1// ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
2
3#include <cstdio>
4#include <string>
5#include <thread>
6
7#include "llama.h"
8#include "get-model.h"
9
10// This creates a new context inside a pthread and then tries to exit cleanly.
11int main(int argc, char ** argv) {
12 auto * model_path = get_model_or_exit(argc, argv);
13
14 std::thread([&model_path]() {
15 llama_backend_init();
16 auto * model = llama_model_load_from_file(path_model: model_path, params: llama_model_default_params());
17 auto * ctx = llama_init_from_model(model, params: llama_context_default_params());
18 llama_free(ctx);
19 llama_model_free(model);
20 llama_backend_free();
21 }).join();
22
23 return 0;
24}
25