| 1 | #include "arg.h" |
| 2 | #include "common.h" |
| 3 | #include "ngram-cache.h" |
| 4 | #include "llama.h" |
| 5 | |
| 6 | #include <string> |
| 7 | #include <vector> |
| 8 | |
| 9 | int main(int argc, char ** argv){ |
| 10 | common_params params; |
| 11 | |
| 12 | if (!common_params_parse(argc, argv, params, ex: LLAMA_EXAMPLE_LOOKUP)) { |
| 13 | return 1; |
| 14 | } |
| 15 | |
| 16 | // init llama.cpp |
| 17 | llama_backend_init(); |
| 18 | llama_numa_init(numa: params.numa); |
| 19 | |
| 20 | // load the model |
| 21 | common_init_result llama_init = common_init_from_params(params); |
| 22 | |
| 23 | llama_model_ptr & model = llama_init.model; |
| 24 | llama_context_ptr & ctx = llama_init.context; |
| 25 | |
| 26 | GGML_ASSERT(model != nullptr); |
| 27 | |
| 28 | // tokenize the prompt |
| 29 | std::vector<llama_token> inp; |
| 30 | inp = common_tokenize(ctx: ctx.get(), text: params.prompt, add_special: true, parse_special: true); |
| 31 | fprintf(stderr, format: "%s: tokenization done\n" , __func__); |
| 32 | |
| 33 | common_ngram_cache ngram_cache; |
| 34 | common_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp_data&: inp, nnew: inp.size(), print_progress: true); |
| 35 | fprintf(stderr, format: "%s: hashing done, writing file to %s\n" , __func__, params.lookup_cache_static.c_str()); |
| 36 | |
| 37 | common_ngram_cache_save(ngram_cache, filename&: params.lookup_cache_static); |
| 38 | |
| 39 | return 0; |
| 40 | } |
| 41 | |