1#ifndef LLAMA_H
2#define LLAMA_H
3
4#include "ggml.h"
5#include "ggml-cpu.h"
6#include "ggml-backend.h"
7#include "ggml-opt.h"
8
9#include <stddef.h>
10#include <stdint.h>
11#include <stdio.h>
12#include <stdbool.h>
13
14#ifdef LLAMA_SHARED
15# if defined(_WIN32) && !defined(__MINGW32__)
16# ifdef LLAMA_BUILD
17# define LLAMA_API __declspec(dllexport)
18# else
19# define LLAMA_API __declspec(dllimport)
20# endif
21# else
22# define LLAMA_API __attribute__ ((visibility ("default")))
23# endif
24#else
25# define LLAMA_API
26#endif
27
28#ifdef __GNUC__
29# define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
30#elif defined(_MSC_VER)
31# define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
32#else
33# define DEPRECATED(func, hint) func
34#endif
35
36#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
37
38#define LLAMA_TOKEN_NULL -1
39
40#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
41#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
42#define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
43
44#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
45#define LLAMA_SESSION_VERSION 9
46
47#define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
48#define LLAMA_STATE_SEQ_VERSION 2
49
50#ifdef __cplusplus
51extern "C" {
52#endif
53
54 //
55 // C interface
56 //
57 // TODO: show sample usage
58 //
59
60 struct llama_vocab;
61 struct llama_model;
62 struct llama_context;
63 struct llama_sampler;
64
65 typedef struct llama_memory_i * llama_memory_t;
66
67 typedef int32_t llama_pos;
68 typedef int32_t llama_token;
69 typedef int32_t llama_seq_id;
70
71 enum llama_vocab_type {
72 LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
73 LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
74 LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
75 LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
76 LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
77 LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization
78 LLAMA_VOCAB_TYPE_PLAMO2 = 6, // PLaMo-2 tokenizer based on Aho-Corasick with dynamic programming
79 };
80
81 enum llama_rope_type {
82 LLAMA_ROPE_TYPE_NONE = -1,
83 LLAMA_ROPE_TYPE_NORM = 0,
84 LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX,
85 LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE,
86 LLAMA_ROPE_TYPE_IMROPE = GGML_ROPE_TYPE_IMROPE,
87 LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION,
88 };
89
90 enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
91 LLAMA_TOKEN_TYPE_UNDEFINED = 0,
92 LLAMA_TOKEN_TYPE_NORMAL = 1,
93 LLAMA_TOKEN_TYPE_UNKNOWN = 2,
94 LLAMA_TOKEN_TYPE_CONTROL = 3,
95 LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
96 LLAMA_TOKEN_TYPE_UNUSED = 5,
97 LLAMA_TOKEN_TYPE_BYTE = 6,
98 };
99
100 enum llama_token_attr {
101 LLAMA_TOKEN_ATTR_UNDEFINED = 0,
102 LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0,
103 LLAMA_TOKEN_ATTR_UNUSED = 1 << 1,
104 LLAMA_TOKEN_ATTR_NORMAL = 1 << 2,
105 LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL?
106 LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4,
107 LLAMA_TOKEN_ATTR_BYTE = 1 << 5,
108 LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6,
109 LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7,
110 LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8,
111 LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9,
112 };
113
114 // model file types
115 enum llama_ftype {
116 LLAMA_FTYPE_ALL_F32 = 0,
117 LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
118 LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
119 LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
120 // LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
121 // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
122 // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
123 LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
124 LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
125 LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
126 LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
127 LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
128 LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
129 LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
130 LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
131 LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
132 LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
133 LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
134 LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
135 LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
136 LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
137 LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
138 LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
139 LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
140 LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
141 LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
142 LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
143 LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
144 LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
145 LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
146 LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
147 LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
148 LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
149 //LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack
150 //LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack
151 //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack
152 LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors
153 LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors
154 LLAMA_FTYPE_MOSTLY_MXFP4_MOE = 38, // except 1d tensors
155
156 LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
157 };
158
159 enum llama_rope_scaling_type {
160 LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1,
161 LLAMA_ROPE_SCALING_TYPE_NONE = 0,
162 LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
163 LLAMA_ROPE_SCALING_TYPE_YARN = 2,
164 LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3,
165 LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE,
166 };
167
168 enum llama_pooling_type {
169 LLAMA_POOLING_TYPE_UNSPECIFIED = -1,
170 LLAMA_POOLING_TYPE_NONE = 0,
171 LLAMA_POOLING_TYPE_MEAN = 1,
172 LLAMA_POOLING_TYPE_CLS = 2,
173 LLAMA_POOLING_TYPE_LAST = 3,
174 LLAMA_POOLING_TYPE_RANK = 4, // used by reranking models to attach the classification head to the graph
175 };
176
177 enum llama_attention_type {
178 LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1,
179 LLAMA_ATTENTION_TYPE_CAUSAL = 0,
180 LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1,
181 };
182
183 enum llama_flash_attn_type {
184 LLAMA_FLASH_ATTN_TYPE_AUTO = -1,
185 LLAMA_FLASH_ATTN_TYPE_DISABLED = 0,
186 LLAMA_FLASH_ATTN_TYPE_ENABLED = 1,
187 };
188
189 LLAMA_API const char * llama_flash_attn_type_name(enum llama_flash_attn_type flash_attn_type);
190
191 enum llama_split_mode {
192 LLAMA_SPLIT_MODE_NONE = 0, // single GPU
193 LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
194 LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported
195 };
196
197 // TODO: simplify (https://github.com/ggml-org/llama.cpp/pull/9294#pullrequestreview-2286561979)
198 typedef struct llama_token_data {
199 llama_token id; // token id
200 float logit; // log-odds of the token
201 float p; // probability of the token
202 } llama_token_data;
203
204 typedef struct llama_token_data_array {
205 // TODO: consider SoA
206 // NOTE: this pointer can be modified by the samplers
207 llama_token_data * data;
208 size_t size;
209 int64_t selected; // this is the index in the data array (i.e. not the token id)
210 bool sorted; // note: do not assume the data is sorted - always check this flag
211 } llama_token_data_array;
212
213 typedef bool (*llama_progress_callback)(float progress, void * user_data);
214
215 // Input data for llama_encode/llama_decode
216 // A llama_batch object can contain input about one or many sequences
217 // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
218 //
219 // - token : the token ids of the input (used when embd is NULL)
220 // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
221 // - pos : the positions of the respective token in the sequence
222 // (if set to NULL, the token position will be tracked automatically by llama_encode/llama_decode)
223 // - seq_id : the sequence to which the respective token belongs
224 // (if set to NULL, the sequence ID will be assumed to be 0)
225 // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
226 // (if set to NULL:
227 // - if embeddings: all tokens are output
228 // - if not: only the last token is output
229 // )
230 //
231 typedef struct llama_batch {
232 int32_t n_tokens;
233
234 llama_token * token;
235 float * embd;
236 llama_pos * pos;
237 int32_t * n_seq_id;
238 llama_seq_id ** seq_id;
239 int8_t * logits; // TODO: rename this to "output"
240 } llama_batch;
241
242 enum llama_model_kv_override_type {
243 LLAMA_KV_OVERRIDE_TYPE_INT,
244 LLAMA_KV_OVERRIDE_TYPE_FLOAT,
245 LLAMA_KV_OVERRIDE_TYPE_BOOL,
246 LLAMA_KV_OVERRIDE_TYPE_STR,
247 };
248
249 struct llama_model_kv_override {
250 enum llama_model_kv_override_type tag;
251
252 char key[128];
253
254 union {
255 int64_t val_i64;
256 double val_f64;
257 bool val_bool;
258 char val_str[128];
259 };
260 };
261
262 struct llama_model_tensor_buft_override {
263 const char * pattern;
264 ggml_backend_buffer_type_t buft;
265 };
266
267 struct llama_model_params {
268 // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used)
269 ggml_backend_dev_t * devices;
270
271 // NULL-terminated list of buffer types to use for tensors that match a pattern
272 const struct llama_model_tensor_buft_override * tensor_buft_overrides;
273
274 int32_t n_gpu_layers; // number of layers to store in VRAM
275 enum llama_split_mode split_mode; // how to split the model across multiple GPUs
276
277 // the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE
278 int32_t main_gpu;
279
280 // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
281 const float * tensor_split;
282
283 // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
284 // If the provided progress_callback returns true, model loading continues.
285 // If it returns false, model loading is immediately aborted.
286 llama_progress_callback progress_callback;
287
288 // context pointer passed to the progress callback
289 void * progress_callback_user_data;
290
291 // override key-value pairs of the model meta data
292 const struct llama_model_kv_override * kv_overrides;
293
294 // Keep the booleans together to avoid misalignment during copy-by-value.
295 bool vocab_only; // only load the vocabulary, no weights
296 bool use_mmap; // use mmap if possible
297 bool use_mlock; // force system to keep model in RAM
298 bool check_tensors; // validate model tensor data
299 bool use_extra_bufts; // use extra buffer types (used for weight repacking)
300 bool no_host; // bypass host buffer allowing extra buffers to be used
301 };
302
303 // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
304 // https://github.com/ggml-org/llama.cpp/pull/7544
305 struct llama_context_params {
306 uint32_t n_ctx; // text context, 0 = from model
307 uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
308 uint32_t n_ubatch; // physical maximum batch size
309 uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
310 int32_t n_threads; // number of threads to use for generation
311 int32_t n_threads_batch; // number of threads to use for batch processing
312
313 enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
314 enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
315 enum llama_attention_type attention_type; // attention type to use for embeddings
316 enum llama_flash_attn_type flash_attn_type; // when to enable Flash Attention
317
318 // ref: https://github.com/ggml-org/llama.cpp/pull/2054
319 float rope_freq_base; // RoPE base frequency, 0 = from model
320 float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
321 float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
322 float yarn_attn_factor; // YaRN magnitude scaling factor
323 float yarn_beta_fast; // YaRN low correction dim
324 float yarn_beta_slow; // YaRN high correction dim
325 uint32_t yarn_orig_ctx; // YaRN original context size
326 float defrag_thold; // [DEPRECATED] defragment the KV cache if holes/size > thold, <= 0 disabled (default)
327
328 ggml_backend_sched_eval_callback cb_eval;
329 void * cb_eval_user_data;
330
331 enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
332 enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
333
334 // Abort callback
335 // if it returns true, execution of llama_decode() will be aborted
336 // currently works only with CPU execution
337 ggml_abort_callback abort_callback;
338 void * abort_callback_data;
339
340 // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
341 bool embeddings; // if true, extract embeddings (together with logits)
342 bool offload_kqv; // offload the KQV ops (including the KV cache) to GPU
343 bool no_perf; // measure performance timings
344 bool op_offload; // offload host tensor operations to device
345 bool swa_full; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
346 // NOTE: setting to false when n_seq_max > 1 can cause bad performance in some cases
347 // ref: https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573
348 bool kv_unified; // use a unified buffer across the input sequences when computing the attention
349 // try to disable when n_seq_max > 1 for improved performance when the sequences do not share a large prefix
350 // ref: https://github.com/ggml-org/llama.cpp/pull/14363
351 };
352
353 // model quantization parameters
354 typedef struct llama_model_quantize_params {
355 int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
356 enum llama_ftype ftype; // quantize to this llama_ftype
357 enum ggml_type output_tensor_type; // output tensor type
358 enum ggml_type token_embedding_type; // token embeddings tensor type
359 bool allow_requantize; // allow quantizing non-f32/f16 tensors
360 bool quantize_output_tensor; // quantize output.weight
361 bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
362 bool pure; // quantize all tensors to the default type
363 bool keep_split; // quantize to the same number of shards
364 void * imatrix; // pointer to importance matrix data
365 void * kv_overrides; // pointer to vector containing overrides
366 void * tensor_types; // pointer to vector containing tensor types
367 void * prune_layers; // pointer to vector containing layer indices to prune
368 } llama_model_quantize_params;
369
370 typedef struct llama_logit_bias {
371 llama_token token;
372 float bias;
373 } llama_logit_bias;
374
375 typedef struct llama_sampler_chain_params {
376 bool no_perf; // whether to measure performance timings
377 } llama_sampler_chain_params;
378
379 // used in chat template
380 typedef struct llama_chat_message {
381 const char * role;
382 const char * content;
383 } llama_chat_message;
384
385 // lora adapter
386 struct llama_adapter_lora;
387
388 // Helpers for getting default parameters
389 // TODO: update API to start accepting pointers to params structs (https://github.com/ggml-org/llama.cpp/discussions/9172)
390 LLAMA_API struct llama_model_params llama_model_default_params(void);
391 LLAMA_API struct llama_context_params llama_context_default_params(void);
392 LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
393 LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
394
395 // Initialize the llama + ggml backend
396 // If numa is true, use NUMA optimizations
397 // Call once at the start of the program
398 LLAMA_API void llama_backend_init(void);
399
400 // Call once at the end of the program - currently only used for MPI
401 LLAMA_API void llama_backend_free(void);
402
403 //optional:
404 LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
405
406 // Optional: an auto threadpool gets created in ggml if not passed explicitly
407 LLAMA_API void llama_attach_threadpool(
408 struct llama_context * ctx,
409 ggml_threadpool_t threadpool,
410 ggml_threadpool_t threadpool_batch);
411
412 LLAMA_API void llama_detach_threadpool(struct llama_context * ctx);
413
414 DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
415 const char * path_model,
416 struct llama_model_params params),
417 "use llama_model_load_from_file instead");
418
419 // Load the model from a file
420 // If the file is split into multiple parts, the file name must follow this pattern: <name>-%05d-of-%05d.gguf
421 // If the split file name does not follow this pattern, use llama_model_load_from_splits
422 LLAMA_API struct llama_model * llama_model_load_from_file(
423 const char * path_model,
424 struct llama_model_params params);
425
426 // Load the model from multiple splits (support custom naming scheme)
427 // The paths must be in the correct order
428 LLAMA_API struct llama_model * llama_model_load_from_splits(
429 const char ** paths,
430 size_t n_paths,
431 struct llama_model_params params);
432
433 LLAMA_API void llama_model_save_to_file(
434 const struct llama_model * model,
435 const char * path_model);
436
437 DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
438 "use llama_model_free instead");
439
440 LLAMA_API void llama_model_free(struct llama_model * model);
441
442 LLAMA_API struct llama_context * llama_init_from_model(
443 struct llama_model * model,
444 struct llama_context_params params);
445
446 DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model(
447 struct llama_model * model,
448 struct llama_context_params params),
449 "use llama_init_from_model instead");
450
451 // Frees all allocated memory
452 LLAMA_API void llama_free(struct llama_context * ctx);
453
454 LLAMA_API int64_t llama_time_us(void);
455
456 LLAMA_API size_t llama_max_devices(void);
457 LLAMA_API size_t llama_max_parallel_sequences(void);
458
459 LLAMA_API bool llama_supports_mmap (void);
460 LLAMA_API bool llama_supports_mlock (void);
461 LLAMA_API bool llama_supports_gpu_offload(void);
462 LLAMA_API bool llama_supports_rpc (void);
463
464 // NOTE: After creating a llama_context, it is recommended to query the actual values using these functions
465 // In some cases the requested values via llama_context_params may differ from the actual values used by the context
466 // ref: https://github.com/ggml-org/llama.cpp/pull/17046#discussion_r2503085732
467 LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
468 LLAMA_API uint32_t llama_n_ctx_seq (const struct llama_context * ctx);
469 LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
470 LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
471 LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
472
473 DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead");
474 DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead");
475 DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead");
476 DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama_model_n_head instead");
477
478 DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead");
479
480 LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx);
481 LLAMA_API llama_memory_t llama_get_memory (const struct llama_context * ctx);
482 LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); // TODO: rename to llama_get_pooling_type
483
484 LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model);
485 LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model);
486
487 LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model);
488 LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model);
489 LLAMA_API int32_t llama_model_n_embd_inp (const struct llama_model * model);
490 LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model);
491 LLAMA_API int32_t llama_model_n_head (const struct llama_model * model);
492 LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model);
493 LLAMA_API int32_t llama_model_n_swa (const struct llama_model * model);
494
495 // Get the model's RoPE frequency scaling factor
496 LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model);
497
498 // Returns the number of classifier outputs (only valid for classifier models)
499 // Undefined behavior for non-classifier models
500 LLAMA_API uint32_t llama_model_n_cls_out(const struct llama_model * model);
501
502 // Returns label of classifier output by index (<n_cls_out). Returns nullptr if no label provided
503 LLAMA_API const char * llama_model_cls_label(const struct llama_model * model, uint32_t i);
504
505 LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_vocab * vocab);
506
507 LLAMA_API int32_t llama_vocab_n_tokens(const struct llama_vocab * vocab);
508
509 // Functions to access the model's GGUF metadata scalar values
510 // - The functions return the length of the string on success, or -1 on failure
511 // - The output string is always null-terminated and cleared on failure
512 // - When retrieving a string, an extra byte must be allocated to account for the null terminator
513 // - GGUF array values are not supported by these functions
514
515 // Get metadata value as a string by key name
516 LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
517
518 // Get the number of metadata key/value pairs
519 LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
520
521 // Get metadata key name by index
522 LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
523
524 // Get metadata value as a string by index
525 LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
526
527 // Get a string describing the model type
528 LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
529
530 // Returns the total size of all the tensors in the model in bytes
531 LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
532
533 // Get the default chat template. Returns nullptr if not available
534 // If name is NULL, returns the default chat template
535 LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name);
536
537 // Returns the total number of parameters in the model
538 LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
539
540 // Returns true if the model contains an encoder that requires llama_encode() call
541 LLAMA_API bool llama_model_has_encoder(const struct llama_model * model);
542
543 // Returns true if the model contains a decoder that requires llama_decode() call
544 LLAMA_API bool llama_model_has_decoder(const struct llama_model * model);
545
546 // For encoder-decoder models, this function returns id of the token that must be provided
547 // to the decoder to start generating output sequence. For other models, it returns -1.
548 LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
549
550 // Returns true if the model is recurrent (like Mamba, RWKV, etc.)
551 LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
552
553 // Returns true if the model is hybrid (like Jamba, Granite, etc.)
554 LLAMA_API bool llama_model_is_hybrid(const struct llama_model * model);
555
556 // Returns true if the model is diffusion-based (like LLaDA, Dream, etc.)
557 LLAMA_API bool llama_model_is_diffusion(const struct llama_model * model);
558
559 // Returns 0 on success
560 LLAMA_API uint32_t llama_model_quantize(
561 const char * fname_inp,
562 const char * fname_out,
563 const llama_model_quantize_params * params);
564
565 //
566 // Adapters
567 //
568
569 // Load a LoRA adapter from file
570 LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init(
571 struct llama_model * model,
572 const char * path_lora);
573
574 // Functions to access the adapter's GGUF metadata scalar values
575 // - The functions return the length of the string on success, or -1 on failure
576 // - The output string is always null-terminated and cleared on failure
577 // - When retrieving a string, an extra byte must be allocated to account for the null terminator
578 // - GGUF array values are not supported by these functions
579
580 // Get metadata value as a string by key name
581 LLAMA_API int32_t llama_adapter_meta_val_str(const struct llama_adapter_lora * adapter, const char * key, char * buf, size_t buf_size);
582
583 // Get the number of metadata key/value pairs
584 LLAMA_API int32_t llama_adapter_meta_count(const struct llama_adapter_lora * adapter);
585
586 // Get metadata key name by index
587 LLAMA_API int32_t llama_adapter_meta_key_by_index(const struct llama_adapter_lora * adapter, int32_t i, char * buf, size_t buf_size);
588
589 // Get metadata value as a string by index
590 LLAMA_API int32_t llama_adapter_meta_val_str_by_index(const struct llama_adapter_lora * adapter, int32_t i, char * buf, size_t buf_size);
591
592 // Manually free a LoRA adapter
593 // NOTE: loaded adapters will be free when the associated model is deleted
594 LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter);
595
596 // Get the invocation tokens if the current lora is an alora
597 LLAMA_API uint64_t llama_adapter_get_alora_n_invocation_tokens(const struct llama_adapter_lora * adapter);
598 LLAMA_API const llama_token * llama_adapter_get_alora_invocation_tokens (const struct llama_adapter_lora * adapter);
599
600 // The following functions operate on a llama_context, hence the naming: llama_verb_...
601
602 // Add a loaded LoRA adapter to given context
603 // This will not modify model's weight
604 LLAMA_API int32_t llama_set_adapter_lora(
605 struct llama_context * ctx,
606 struct llama_adapter_lora * adapter,
607 float scale);
608
609 // Remove a specific LoRA adapter from given context
610 // Return -1 if the adapter is not present in the context
611 LLAMA_API int32_t llama_rm_adapter_lora(
612 struct llama_context * ctx,
613 struct llama_adapter_lora * adapter);
614
615 // Remove all LoRA adapters from given context
616 LLAMA_API void llama_clear_adapter_lora(struct llama_context * ctx);
617
618 // Apply a loaded control vector to a llama_context, or if data is NULL, clear
619 // the currently loaded vector.
620 // n_embd should be the size of a single layer's control, and data should point
621 // to an n_embd x n_layers buffer starting from layer 1.
622 // il_start and il_end are the layer range the vector should apply to (both inclusive)
623 // See llama_control_vector_load in common to load a control vector.
624 LLAMA_API int32_t llama_apply_adapter_cvec(
625 struct llama_context * ctx,
626 const float * data,
627 size_t len,
628 int32_t n_embd,
629 int32_t il_start,
630 int32_t il_end);
631
632 //
633 // Memory
634 //
635
636 // Clear the memory contents
637 // If data == true, the data buffers will also be cleared together with the metadata
638 LLAMA_API void llama_memory_clear(
639 llama_memory_t mem,
640 bool data);
641
642 // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
643 // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
644 // seq_id < 0 : match any sequence
645 // p0 < 0 : [0, p1]
646 // p1 < 0 : [p0, inf)
647 LLAMA_API bool llama_memory_seq_rm(
648 llama_memory_t mem,
649 llama_seq_id seq_id,
650 llama_pos p0,
651 llama_pos p1);
652
653 // Copy all tokens that belong to the specified sequence to another sequence
654 // p0 < 0 : [0, p1]
655 // p1 < 0 : [p0, inf)
656 LLAMA_API void llama_memory_seq_cp(
657 llama_memory_t mem,
658 llama_seq_id seq_id_src,
659 llama_seq_id seq_id_dst,
660 llama_pos p0,
661 llama_pos p1);
662
663 // Removes all tokens that do not belong to the specified sequence
664 LLAMA_API void llama_memory_seq_keep(
665 llama_memory_t mem,
666 llama_seq_id seq_id);
667
668 // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
669 // p0 < 0 : [0, p1]
670 // p1 < 0 : [p0, inf)
671 LLAMA_API void llama_memory_seq_add(
672 llama_memory_t mem,
673 llama_seq_id seq_id,
674 llama_pos p0,
675 llama_pos p1,
676 llama_pos delta);
677
678 // Integer division of the positions by factor of `d > 1`
679 // p0 < 0 : [0, p1]
680 // p1 < 0 : [p0, inf)
681 LLAMA_API void llama_memory_seq_div(
682 llama_memory_t mem,
683 llama_seq_id seq_id,
684 llama_pos p0,
685 llama_pos p1,
686 int d);
687
688 // Returns the smallest position present in the memory for the specified sequence
689 // This is typically non-zero only for SWA caches
690 // Note that all positions in the range [pos_min, pos_max] are guaranteed to be present in the memory
691 // Return -1 if the sequence is empty
692 LLAMA_API llama_pos llama_memory_seq_pos_min(
693 llama_memory_t mem,
694 llama_seq_id seq_id);
695
696 // Returns the largest position present in the memory for the specified sequence
697 // Note that all positions in the range [pos_min, pos_max] are guaranteed to be present in the memory
698 // Return -1 if the sequence is empty
699 LLAMA_API llama_pos llama_memory_seq_pos_max(
700 llama_memory_t mem,
701 llama_seq_id seq_id);
702
703 // Check if the memory supports shifting
704 LLAMA_API bool llama_memory_can_shift(llama_memory_t mem);
705
706 //
707 // State / sessions
708 //
709
710 // Returns the *actual* size in bytes of the state
711 // (logits, embedding and memory)
712 // Only use when saving the state, not when restoring it, otherwise the size may be too small.
713 LLAMA_API size_t llama_state_get_size(struct llama_context * ctx);
714 LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx),
715 "use llama_state_get_size instead");
716
717 // Copies the state to the specified destination address.
718 // Destination needs to have allocated enough memory.
719 // Returns the number of bytes copied
720 LLAMA_API size_t llama_state_get_data(
721 struct llama_context * ctx,
722 uint8_t * dst,
723 size_t size);
724 LLAMA_API DEPRECATED(size_t llama_copy_state_data(
725 struct llama_context * ctx,
726 uint8_t * dst),
727 "use llama_state_get_data instead");
728
729 // Set the state reading from the specified address
730 // Returns the number of bytes read
731 LLAMA_API size_t llama_state_set_data(
732 struct llama_context * ctx,
733 const uint8_t * src,
734 size_t size);
735 LLAMA_API DEPRECATED(size_t llama_set_state_data(
736 struct llama_context * ctx,
737 const uint8_t * src),
738 "use llama_state_set_data instead");
739
740 // Save/load session file
741 LLAMA_API bool llama_state_load_file(
742 struct llama_context * ctx,
743 const char * path_session,
744 llama_token * tokens_out,
745 size_t n_token_capacity,
746 size_t * n_token_count_out);
747 LLAMA_API DEPRECATED(bool llama_load_session_file(
748 struct llama_context * ctx,
749 const char * path_session,
750 llama_token * tokens_out,
751 size_t n_token_capacity,
752 size_t * n_token_count_out),
753 "use llama_state_load_file instead");
754
755 LLAMA_API bool llama_state_save_file(
756 struct llama_context * ctx,
757 const char * path_session,
758 const llama_token * tokens,
759 size_t n_token_count);
760 LLAMA_API DEPRECATED(bool llama_save_session_file(
761 struct llama_context * ctx,
762 const char * path_session,
763 const llama_token * tokens,
764 size_t n_token_count),
765 "use llama_state_save_file instead");
766
767 // Get the exact size needed to copy the state of a single sequence
768 LLAMA_API size_t llama_state_seq_get_size(
769 struct llama_context * ctx,
770 llama_seq_id seq_id);
771
772 // Copy the state of a single sequence into the specified buffer
773 LLAMA_API size_t llama_state_seq_get_data(
774 struct llama_context * ctx,
775 uint8_t * dst,
776 size_t size,
777 llama_seq_id seq_id);
778
779 // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
780 // Returns:
781 // - Positive: Ok
782 // - Zero: Failed to load
783 LLAMA_API size_t llama_state_seq_set_data(
784 struct llama_context * ctx,
785 const uint8_t * src,
786 size_t size,
787 llama_seq_id dest_seq_id);
788
789 LLAMA_API size_t llama_state_seq_save_file(
790 struct llama_context * ctx,
791 const char * filepath,
792 llama_seq_id seq_id,
793 const llama_token * tokens,
794 size_t n_token_count);
795
796 LLAMA_API size_t llama_state_seq_load_file(
797 struct llama_context * ctx,
798 const char * filepath,
799 llama_seq_id dest_seq_id,
800 llama_token * tokens_out,
801 size_t n_token_capacity,
802 size_t * n_token_count_out);
803
804// for backwards-compat
805#define LLAMA_STATE_SEQ_FLAGS_SWA_ONLY 1
806
807// work only with partial states, such as SWA KV cache or recurrent cache (e.g. Mamba)
808#define LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY 1
809
810 typedef uint32_t llama_state_seq_flags;
811
812 LLAMA_API size_t llama_state_seq_get_size_ext(
813 struct llama_context * ctx,
814 llama_seq_id seq_id,
815 llama_state_seq_flags flags);
816
817 LLAMA_API size_t llama_state_seq_get_data_ext(
818 struct llama_context * ctx,
819 uint8_t * dst,
820 size_t size,
821 llama_seq_id seq_id,
822 llama_state_seq_flags flags);
823
824 LLAMA_API size_t llama_state_seq_set_data_ext(
825 struct llama_context * ctx,
826 const uint8_t * src,
827 size_t size,
828 llama_seq_id dest_seq_id,
829 llama_state_seq_flags flags);
830
831 //
832 // Decoding
833 //
834
835 // Return batch for single sequence of tokens
836 // The sequence ID will be fixed to 0
837 // The position of the tokens will be tracked automatically by llama_decode
838 //
839 // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
840 //
841 LLAMA_API struct llama_batch llama_batch_get_one(
842 llama_token * tokens,
843 int32_t n_tokens);
844
845 // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
846 // Each token can be assigned up to n_seq_max sequence ids
847 // The batch has to be freed with llama_batch_free()
848 // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
849 // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
850 // The rest of the llama_batch members are allocated with size n_tokens
851 // All members are left uninitialized
852 LLAMA_API struct llama_batch llama_batch_init(
853 int32_t n_tokens,
854 int32_t embd,
855 int32_t n_seq_max);
856
857 // Frees a batch of tokens allocated with llama_batch_init()
858 LLAMA_API void llama_batch_free(struct llama_batch batch);
859
860 // Process a batch of tokens.
861 // In contrast to llama_decode() - this call does not use KV cache.
862 // For encode-decoder contexts, processes the batch using the encoder.
863 // Can store the encoder output internally for later use by the decoder's cross-attention layers.
864 // 0 - success
865 // < 0 - error. the memory state is restored to the state before this call
866 LLAMA_API int32_t llama_encode(
867 struct llama_context * ctx,
868 struct llama_batch batch);
869
870 // Process a batch of tokens.
871 // Requires the context to have a memory.
872 // For encode-decoder contexts, processes the batch using the decoder.
873 // Positive return values does not mean a fatal error, but rather a warning.
874 // Upon fatal-error or abort, the ubatches that managed to be been processed will remain in the memory state of the context
875 // To handle this correctly, query the memory state using llama_memory_seq_pos_min() and llama_memory_seq_pos_max()
876 // Upon other return values, the memory state is restored to the state before this call
877 // 0 - success
878 // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
879 // 2 - aborted (processed ubatches will remain in the context's memory)
880 // -1 - invalid input batch
881 // < -1 - fatal error (processed ubatches will remain in the context's memory)
882 LLAMA_API int32_t llama_decode(
883 struct llama_context * ctx,
884 struct llama_batch batch);
885
886 // Set the number of threads used for decoding
887 // n_threads is the number of threads used for generation (single token)
888 // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
889 LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch);
890
891 // Get the number of threads used for generation of a single token.
892 LLAMA_API int32_t llama_n_threads(struct llama_context * ctx);
893
894 // Get the number of threads used for prompt and batch processing (multiple token).
895 LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx);
896
897 // Set whether the context outputs embeddings or not
898 // TODO: rename to avoid confusion with llama_get_embeddings()
899 LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings);
900
901 // Set whether to use causal attention or not
902 // If set to true, the model will only attend to the past tokens
903 LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
904
905 // Set whether the model is in warmup mode or not
906 // If true, all model tensors are activated during llama_decode() to load and cache their weights.
907 LLAMA_API void llama_set_warmup(struct llama_context * ctx, bool warmup);
908
909 // Set abort callback
910 LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data);
911
912 // Wait until all computations are finished
913 // This is automatically done when using one of the functions below to obtain the computation results
914 // and is not necessary to call it explicitly in most cases
915 LLAMA_API void llama_synchronize(struct llama_context * ctx);
916
917 // Token logits obtained from the last call to llama_decode()
918 // The logits for which llama_batch.logits[i] != 0 are stored contiguously
919 // in the order they have appeared in the batch.
920 // Rows: number of tokens for which llama_batch.logits[i] != 0
921 // Cols: n_vocab
922 // TODO: deprecate in favor of llama_get_logits_ith() (ref: https://github.com/ggml-org/llama.cpp/pull/14853#issuecomment-3113143522)
923 LLAMA_API float * llama_get_logits(struct llama_context * ctx);
924
925 // Logits for the ith token. For positive indices, Equivalent to:
926 // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
927 // Negative indicies can be used to access logits in reverse order, -1 is the last logit.
928 // returns NULL for invalid ids.
929 LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
930
931 // Get all output token embeddings.
932 // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model,
933 // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously
934 // in the order they have appeared in the batch.
935 // shape: [n_outputs*n_embd]
936 // Otherwise, returns NULL.
937 // TODO: deprecate in favor of llama_get_embeddings_ith() (ref: https://github.com/ggml-org/llama.cpp/pull/14853#issuecomment-3113143522)
938 LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
939
940 // Get the embeddings for the ith token. For positive indices, Equivalent to:
941 // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
942 // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
943 // shape: [n_embd] (1-dimensional)
944 // returns NULL for invalid ids.
945 LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);
946
947 // Get the embeddings for a sequence id
948 // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
949 // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[n_cls_out] with the rank(s) of the sequence
950 // otherwise: float[n_embd] (1-dimensional)
951 LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
952
953 //
954 // Vocab
955 //
956
957 LLAMA_API const char * llama_vocab_get_text(const struct llama_vocab * vocab, llama_token token);
958
959 LLAMA_API float llama_vocab_get_score(const struct llama_vocab * vocab, llama_token token);
960
961 LLAMA_API enum llama_token_attr llama_vocab_get_attr(const struct llama_vocab * vocab, llama_token token);
962
963 // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
964 LLAMA_API bool llama_vocab_is_eog(const struct llama_vocab * vocab, llama_token token);
965
966 // Identify if Token Id is a control token or a render-able token
967 LLAMA_API bool llama_vocab_is_control(const struct llama_vocab * vocab, llama_token token);
968
969 // Special tokens
970 LLAMA_API llama_token llama_vocab_bos(const struct llama_vocab * vocab); // beginning-of-sentence
971 LLAMA_API llama_token llama_vocab_eos(const struct llama_vocab * vocab); // end-of-sentence
972 LLAMA_API llama_token llama_vocab_eot(const struct llama_vocab * vocab); // end-of-turn
973 LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator
974 LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line
975 LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding
976 LLAMA_API llama_token llama_vocab_mask(const struct llama_vocab * vocab); // mask
977
978 LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab);
979 LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab);
980 LLAMA_API bool llama_vocab_get_add_sep(const struct llama_vocab * vocab);
981
982 LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab);
983 LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab);
984 LLAMA_API llama_token llama_vocab_fim_mid(const struct llama_vocab * vocab);
985 LLAMA_API llama_token llama_vocab_fim_pad(const struct llama_vocab * vocab);
986 LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab);
987 LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab);
988
989 DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_text instead");
990 DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead");
991 DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead");
992 DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead");
993 DEPRECATED(LLAMA_API bool llama_token_is_control(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_control instead");
994 DEPRECATED(LLAMA_API llama_token llama_token_bos(const struct llama_vocab * vocab), "use llama_vocab_bos instead");
995 DEPRECATED(LLAMA_API llama_token llama_token_eos(const struct llama_vocab * vocab), "use llama_vocab_eos instead");
996 DEPRECATED(LLAMA_API llama_token llama_token_eot(const struct llama_vocab * vocab), "use llama_vocab_eot instead");
997 DEPRECATED(LLAMA_API llama_token llama_token_cls(const struct llama_vocab * vocab), "use llama_vocab_cls instead");
998 DEPRECATED(LLAMA_API llama_token llama_token_sep(const struct llama_vocab * vocab), "use llama_vocab_sep instead");
999 DEPRECATED(LLAMA_API llama_token llama_token_nl (const struct llama_vocab * vocab), "use llama_vocab_nl instead");
1000 DEPRECATED(LLAMA_API llama_token llama_token_pad(const struct llama_vocab * vocab), "use llama_vocab_pad instead");
1001 DEPRECATED(LLAMA_API bool llama_add_bos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_bos instead");
1002 DEPRECATED(LLAMA_API bool llama_add_eos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_eos instead");
1003 DEPRECATED(LLAMA_API llama_token llama_token_fim_pre(const struct llama_vocab * vocab), "use llama_vocab_fim_pre instead");
1004 DEPRECATED(LLAMA_API llama_token llama_token_fim_suf(const struct llama_vocab * vocab), "use llama_vocab_fim_suf instead");
1005 DEPRECATED(LLAMA_API llama_token llama_token_fim_mid(const struct llama_vocab * vocab), "use llama_vocab_fim_mid instead");
1006 DEPRECATED(LLAMA_API llama_token llama_token_fim_pad(const struct llama_vocab * vocab), "use llama_vocab_fim_pad instead");
1007 DEPRECATED(LLAMA_API llama_token llama_token_fim_rep(const struct llama_vocab * vocab), "use llama_vocab_fim_rep instead");
1008 DEPRECATED(LLAMA_API llama_token llama_token_fim_sep(const struct llama_vocab * vocab), "use llama_vocab_fim_sep instead");
1009
1010 // CLS is equivalent to BOS
1011 DEPRECATED(LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab), // classification
1012 "use llama_vocab_bos instead");
1013
1014 //
1015 // Tokenization
1016 //
1017 // The API is thread-safe.
1018 //
1019
1020 /// @details Convert the provided text into tokens.
1021 /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
1022 /// @return Returns the number of tokens on success, no more than n_tokens_max
1023 /// @return Returns a negative number on failure - the number of tokens that would have been returned
1024 /// @return Returns INT32_MIN on overflow (e.g., tokenization result size exceeds int32_t limit)
1025 /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
1026 /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
1027 /// as plaintext. Does not insert a leading space.
1028 LLAMA_API int32_t llama_tokenize(
1029 const struct llama_vocab * vocab,
1030 const char * text,
1031 int32_t text_len,
1032 llama_token * tokens,
1033 int32_t n_tokens_max,
1034 bool add_special,
1035 bool parse_special);
1036
1037 // Token Id -> Piece.
1038 // Uses the vocabulary in the provided context.
1039 // Does not write null terminator to the buffer.
1040 // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix')
1041 // @param special If true, special tokens are rendered in the output.
1042 LLAMA_API int32_t llama_token_to_piece(
1043 const struct llama_vocab * vocab,
1044 llama_token token,
1045 char * buf,
1046 int32_t length,
1047 int32_t lstrip,
1048 bool special);
1049
1050 /// @details Convert the provided tokens into text (inverse of llama_tokenize()).
1051 /// @param text The char pointer must be large enough to hold the resulting text.
1052 /// @return Returns the number of chars/bytes on success, no more than text_len_max.
1053 /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned.
1054 /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so.
1055 /// @param unparse_special If true, special tokens are rendered in the output.
1056 LLAMA_API int32_t llama_detokenize(
1057 const struct llama_vocab * vocab,
1058 const llama_token * tokens,
1059 int32_t n_tokens,
1060 char * text,
1061 int32_t text_len_max,
1062 bool remove_special,
1063 bool unparse_special);
1064
1065 //
1066 // Chat templates
1067 //
1068
1069 /// Apply chat template. Inspired by hf apply_chat_template() on python.
1070 /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
1071 /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggml-org/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
1072 /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
1073 /// @param chat Pointer to a list of multiple llama_chat_message
1074 /// @param n_msg Number of llama_chat_message in this chat
1075 /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.
1076 /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
1077 /// @param length The size of the allocated buffer
1078 /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
1079 LLAMA_API int32_t llama_chat_apply_template(
1080 const char * tmpl,
1081 const struct llama_chat_message * chat,
1082 size_t n_msg,
1083 bool add_ass,
1084 char * buf,
1085 int32_t length);
1086
1087 // Get list of built-in chat templates
1088 LLAMA_API int32_t llama_chat_builtin_templates(const char ** output, size_t len);
1089
1090 //
1091 // Sampling API
1092 //
1093 // Sample usage:
1094 //
1095 // // prepare the sampling chain at the start
1096 // auto sparams = llama_sampler_chain_default_params();
1097 //
1098 // llama_sampler * smpl = llama_sampler_chain_init(sparams);
1099 //
1100 // llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50));
1101 // llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1));
1102 // llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8));
1103 //
1104 // // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat"
1105 // // this sampler will be responsible to select the actual token
1106 // llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed));
1107 //
1108 // ...
1109 //
1110 // // decoding loop:
1111 // while (...) {
1112 // ...
1113 //
1114 // llama_decode(ctx, batch);
1115 //
1116 // // sample from the logits of the last token in the batch
1117 // const llama_token id = llama_sampler_sample(smpl, ctx, -1);
1118 //
1119 // ...
1120 // }
1121 //
1122 // llama_sampler_free(smpl);
1123 //
1124 // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU).
1125 //
1126
1127 typedef void * llama_sampler_context_t;
1128
1129 // user code can implement the interface below in order to create custom llama_sampler
1130 struct llama_sampler_i {
1131 const char * (*name) (const struct llama_sampler * smpl); // can be NULL
1132 void (*accept)( struct llama_sampler * smpl, llama_token token); // can be NULL
1133 void (*apply) ( struct llama_sampler * smpl, llama_token_data_array * cur_p); // required
1134 void (*reset) ( struct llama_sampler * smpl); // can be NULL
1135 struct llama_sampler * (*clone) (const struct llama_sampler * smpl); // can be NULL if ctx is NULL
1136 void (*free) ( struct llama_sampler * smpl); // can be NULL if ctx is NULL
1137
1138 // TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph
1139 //void (*apply_ggml) (struct llama_sampler * smpl, ...);
1140 };
1141
1142 struct llama_sampler {
1143 const struct llama_sampler_i * iface;
1144 llama_sampler_context_t ctx;
1145 };
1146
1147 // mirror of llama_sampler_i:
1148 LLAMA_API struct llama_sampler * llama_sampler_init (const struct llama_sampler_i * iface, llama_sampler_context_t ctx);
1149 LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl);
1150 LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token);
1151 LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p);
1152 LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl);
1153 LLAMA_API struct llama_sampler * llama_sampler_clone (const struct llama_sampler * smpl);
1154 // important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add)
1155 LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl);
1156
1157 // llama_sampler_chain
1158 // a type of llama_sampler that can chain multiple samplers one after another
1159
1160 LLAMA_API struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params);
1161
1162 // important: takes ownership of the sampler object and will free it when llama_sampler_free is called
1163 LLAMA_API void llama_sampler_chain_add( struct llama_sampler * chain, struct llama_sampler * smpl);
1164 LLAMA_API struct llama_sampler * llama_sampler_chain_get(const struct llama_sampler * chain, int32_t i);
1165 LLAMA_API int llama_sampler_chain_n (const struct llama_sampler * chain);
1166
1167 // after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed
1168 LLAMA_API struct llama_sampler * llama_sampler_chain_remove( struct llama_sampler * chain, int32_t i);
1169
1170 // available samplers:
1171
1172 LLAMA_API struct llama_sampler * llama_sampler_init_greedy(void);
1173 LLAMA_API struct llama_sampler * llama_sampler_init_dist (uint32_t seed);
1174
1175 /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1176 /// Setting k <= 0 makes this a noop
1177 LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k);
1178
1179 /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1180 LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep);
1181
1182 /// @details Minimum P sampling as described in https://github.com/ggml-org/llama.cpp/pull/3841
1183 LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep);
1184
1185 /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
1186 LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep);
1187
1188 /// #details Updates the logits l_i` = l_i/t. When t <= 0.0f, the maximum logit is kept at it's original value, the rest are set to -inf
1189 LLAMA_API struct llama_sampler * llama_sampler_init_temp (float t);
1190
1191 /// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772.
1192 LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext (float t, float delta, float exponent);
1193
1194 /// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335
1195 LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed);
1196
1197 /// @details Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641
1198 LLAMA_API struct llama_sampler * llama_sampler_init_top_n_sigma(float n);
1199
1200 /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1201 /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1202 /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1203 /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1204 /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
1205 /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1206 LLAMA_API struct llama_sampler * llama_sampler_init_mirostat(
1207 int32_t n_vocab,
1208 uint32_t seed,
1209 float tau,
1210 float eta,
1211 int32_t m);
1212
1213 /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1214 /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1215 /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1216 /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1217 /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1218 LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2(
1219 uint32_t seed,
1220 float tau,
1221 float eta);
1222
1223 /// @details Intializes a GBNF grammar, see grammars/README.md for details.
1224 /// @param vocab The vocabulary that this grammar will be used with.
1225 /// @param grammar_str The production rules for the grammar, encoded as a string. Returns an empty grammar if empty. Returns NULL if parsing of grammar_str fails.
1226 /// @param grammar_root The name of the start symbol for the grammar.
1227 LLAMA_API struct llama_sampler * llama_sampler_init_grammar(
1228 const struct llama_vocab * vocab,
1229 const char * grammar_str,
1230 const char * grammar_root);
1231
1232 DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy(
1233 const struct llama_vocab * vocab,
1234 const char * grammar_str,
1235 const char * grammar_root,
1236 const char ** trigger_words,
1237 size_t num_trigger_words,
1238 const llama_token * trigger_tokens,
1239 size_t num_trigger_tokens),
1240 "use llama_sampler_init_grammar_lazy_patterns instead");
1241
1242
1243 /// @details Lazy grammar sampler, introduced in https://github.com/ggml-org/llama.cpp/pull/9639
1244 /// @param trigger_patterns A list of patterns that will trigger the grammar sampler. Pattern will be matched from the start of the generation output, and grammar sampler will be fed content starting from its first match group.
1245 /// @param trigger_tokens A list of tokens that will trigger the grammar sampler. Grammar sampler will be fed content starting from the trigger token included.
1246 LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy_patterns(
1247 const struct llama_vocab * vocab,
1248 const char * grammar_str,
1249 const char * grammar_root,
1250 const char ** trigger_patterns,
1251 size_t num_trigger_patterns,
1252 const llama_token * trigger_tokens,
1253 size_t num_trigger_tokens);
1254
1255
1256 /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first.
1257 LLAMA_API struct llama_sampler * llama_sampler_init_penalties(
1258 int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size)
1259 float penalty_repeat, // 1.0 = disabled
1260 float penalty_freq, // 0.0 = disabled
1261 float penalty_present); // 0.0 = disabled
1262
1263 /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982
1264 LLAMA_API struct llama_sampler * llama_sampler_init_dry(
1265 const struct llama_vocab * vocab,
1266 int32_t n_ctx_train,
1267 float dry_multiplier,
1268 float dry_base,
1269 int32_t dry_allowed_length,
1270 int32_t dry_penalty_last_n,
1271 const char ** seq_breakers,
1272 size_t num_breakers);
1273
1274 LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias(
1275 int32_t n_vocab,
1276 int32_t n_logit_bias,
1277 const llama_logit_bias * logit_bias);
1278
1279 // this sampler is meant to be used for fill-in-the-middle infilling
1280 // it's supposed to be used after top_k + top_p sampling
1281 //
1282 // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG
1283 // 2. combine probs of tokens that have the same prefix
1284 //
1285 // example:
1286 //
1287 // - before:
1288 // "hel": 0.5
1289 // "hell": 0.2
1290 // "hello": 0.1
1291 // "dummy": 0.1
1292 //
1293 // - after:
1294 // "hel": 0.8
1295 // "dummy": 0.1
1296 //
1297 // 3. discard non-EOG tokens with low prob
1298 // 4. if no tokens are left -> pick EOT
1299 //
1300 LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab);
1301
1302 // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise
1303 LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl);
1304
1305 /// @details Sample and accept a token from the idx-th output of the last evaluation
1306 //
1307 // Shorthand for:
1308 // const auto * logits = llama_get_logits_ith(ctx, idx);
1309 // llama_token_data_array cur_p = { ... init from logits ... };
1310 // llama_sampler_apply(smpl, &cur_p);
1311 // auto token = cur_p.data[cur_p.selected].id;
1312 // llama_sampler_accept(smpl, token);
1313 // return token;
1314 // Returns the sampled token
1315 LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx);
1316
1317 // TODO: extend in the future
1318 //LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...);
1319
1320 //
1321 // Model split
1322 //
1323
1324 /// @details Build a split GGUF final path for this chunk.
1325 /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
1326 // Returns the split_path length.
1327 LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
1328
1329 /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
1330 /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
1331 // Returns the split_prefix length.
1332 LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
1333
1334 // Print system information
1335 LLAMA_API const char * llama_print_system_info(void);
1336
1337 // Set callback for all future logging events.
1338 // If this is not called, or NULL is supplied, everything is output on stderr.
1339 LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
1340
1341 //
1342 // Performance utils
1343 //
1344 // NOTE: Used by llama.cpp examples/tools, avoid using in third-party apps. Instead, do your own performance measurements.
1345 //
1346
1347 struct llama_perf_context_data {
1348 // ms == milliseconds
1349 double t_start_ms; // absolute start time
1350 double t_load_ms; // time needed for loading the model
1351 double t_p_eval_ms; // time needed for processing the prompt
1352 double t_eval_ms; // time needed for generating tokens
1353
1354 int32_t n_p_eval; // number of prompt tokens
1355 int32_t n_eval; // number of generated tokens
1356 int32_t n_reused; // number of times a ggml compute graph had been reused
1357 };
1358
1359 struct llama_perf_sampler_data {
1360 double t_sample_ms; // time needed for sampling in ms
1361
1362 int32_t n_sample; // number of sampled tokens
1363 };
1364
1365 LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx);
1366 LLAMA_API void llama_perf_context_print(const struct llama_context * ctx);
1367 LLAMA_API void llama_perf_context_reset( struct llama_context * ctx);
1368
1369 // NOTE: the following work only with samplers constructed via llama_sampler_chain_init
1370 LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain);
1371 LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
1372 LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
1373
1374 // print a breakdown of per-device memory use via LLAMA_LOG:
1375 LLAMA_API void llama_memory_breakdown_print(const struct llama_context * ctx);
1376
1377 //
1378 // training
1379 //
1380
1381 // function that returns whether or not a given tensor contains trainable parameters
1382 typedef bool (*llama_opt_param_filter)(const struct ggml_tensor * tensor, void * userdata);
1383
1384 // always returns true
1385 LLAMA_API bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata);
1386
1387 struct llama_opt_params {
1388 uint32_t n_ctx_train; // assumed context size post training, use context size specified in llama_context if 0
1389
1390 llama_opt_param_filter param_filter; // callback for determining which tensors contain trainable parameters
1391 void * param_filter_ud; // userdata for determining which tensors contain trainable parameters
1392
1393 ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters
1394 void * get_opt_pars_ud; // userdata for calculating optimizer parameters
1395
1396 enum ggml_opt_optimizer_type optimizer_type;
1397 };
1398
1399 LLAMA_API void llama_opt_init(struct llama_context * lctx, struct llama_model * model, struct llama_opt_params lopt_params);
1400
1401 LLAMA_API void llama_opt_epoch(
1402 struct llama_context * lctx,
1403 ggml_opt_dataset_t dataset,
1404 ggml_opt_result_t result_train,
1405 ggml_opt_result_t result_eval,
1406 int64_t idata_split,
1407 ggml_opt_epoch_callback callback_train,
1408 ggml_opt_epoch_callback callback_eval);
1409
1410#ifdef __cplusplus
1411}
1412#endif
1413
1414#endif // LLAMA_H
1415