1#include <algorithm>
2#include <array>
3#include <cassert>
4#include <chrono>
5#include <cinttypes>
6#include <clocale>
7#include <cmath>
8#include <cstdio>
9#include <cstdlib>
10#include <cstring>
11#include <ctime>
12#include <iterator>
13#include <map>
14#include <numeric>
15#include <regex>
16#include <sstream>
17#include <string>
18#include <thread>
19#include <vector>
20#include <unordered_set>
21
22#include "common.h"
23#include "ggml.h"
24#include "llama.h"
25
26#ifdef _WIN32
27# define WIN32_LEAN_AND_MEAN
28# ifndef NOMINMAX
29# define NOMINMAX
30# endif
31# include <windows.h>
32#endif
33
34// utils
35static uint64_t get_time_ns() {
36 using clock = std::chrono::high_resolution_clock;
37 return std::chrono::nanoseconds(clock::now().time_since_epoch()).count();
38}
39
40static bool tensor_buft_override_equal(const llama_model_tensor_buft_override& a, const llama_model_tensor_buft_override& b) {
41 if (a.pattern != b.pattern) {
42 // cString comparison that may be null
43 if (a.pattern == nullptr || b.pattern == nullptr) {
44 return false;
45 }
46 if (strcmp(s1: a.pattern, s2: b.pattern) != 0) {
47 return false;
48 }
49 }
50 if (a.buft != b.buft) {
51 return false;
52 }
53 return true;
54}
55
56static bool vec_tensor_buft_override_equal(const std::vector<llama_model_tensor_buft_override>& a, const std::vector<llama_model_tensor_buft_override>& b) {
57 if (a.size() != b.size()) {
58 return false;
59 }
60 for (size_t i = 0; i < a.size(); i++) {
61 if (!tensor_buft_override_equal(a: a[i], b: b[i])) {
62 return false;
63 }
64 }
65 return true;
66}
67
68static bool vec_vec_tensor_buft_override_equal(const std::vector<std::vector<llama_model_tensor_buft_override>>& a, const std::vector<std::vector<llama_model_tensor_buft_override>>& b) {
69 if (a.size() != b.size()) {
70 return false;
71 }
72 for (size_t i = 0; i < a.size(); i++) {
73 if (!vec_tensor_buft_override_equal(a: a[i], b: b[i])) {
74 return false;
75 }
76 }
77 return true;
78}
79
80template <class T> static std::string join(const std::vector<T> & values, const std::string & delim) {
81 std::ostringstream str;
82 for (size_t i = 0; i < values.size(); i++) {
83 str << values[i];
84 if (i < values.size() - 1) {
85 str << delim;
86 }
87 }
88 return str.str();
89}
90
91template <typename T, typename F> static std::vector<std::string> transform_to_str(const std::vector<T> & values, F f) {
92 std::vector<std::string> str_values;
93 std::transform(values.begin(), values.end(), std::back_inserter(x&: str_values), f);
94 return str_values;
95}
96
97template <typename T> static T avg(const std::vector<T> & v) {
98 if (v.empty()) {
99 return 0;
100 }
101 T sum = std::accumulate(v.begin(), v.end(), T(0));
102 return sum / (T) v.size();
103}
104
105template <typename T> static T stdev(const std::vector<T> & v) {
106 if (v.size() <= 1) {
107 return 0;
108 }
109 T mean = avg(v);
110 T sq_sum = std::inner_product(v.begin(), v.end(), v.begin(), T(0));
111 T stdev = std::sqrt(sq_sum / (T) (v.size() - 1) - mean * mean * (T) v.size() / (T) (v.size() - 1));
112 return stdev;
113}
114
115static std::string get_cpu_info() {
116 std::vector<std::string> cpu_list;
117 for (size_t i = 0; i < ggml_backend_dev_count(); i++) {
118 auto * dev = ggml_backend_dev_get(index: i);
119 auto dev_type = ggml_backend_dev_type(device: dev);
120 if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU || dev_type == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
121 cpu_list.push_back(x: ggml_backend_dev_description(device: dev));
122 }
123 }
124 return join(values: cpu_list, delim: ", ");
125}
126
127static std::string get_gpu_info() {
128 std::vector<std::string> gpu_list;
129 for (size_t i = 0; i < ggml_backend_dev_count(); i++) {
130 auto * dev = ggml_backend_dev_get(index: i);
131 auto dev_type = ggml_backend_dev_type(device: dev);
132 if (dev_type == GGML_BACKEND_DEVICE_TYPE_GPU || dev_type == GGML_BACKEND_DEVICE_TYPE_IGPU) {
133 gpu_list.push_back(x: ggml_backend_dev_description(device: dev));
134 }
135 }
136 return join(values: gpu_list, delim: ", ");
137}
138
139static std::vector<ggml_backend_dev_t> parse_devices_arg(const std::string & value) {
140 std::vector<ggml_backend_dev_t> devices;
141 std::string trimmed = string_strip(str: value);
142 if (trimmed.empty()) {
143 throw std::invalid_argument("no devices specified");
144 }
145 if (trimmed == "auto") {
146 return devices;
147 }
148
149 auto dev_names = string_split<std::string>(input: trimmed, separator: '/');
150 if (dev_names.size() == 1 && string_strip(str: dev_names[0]) == "none") {
151 devices.push_back(x: nullptr);
152 return devices;
153 }
154
155 for (auto & name : dev_names) {
156 std::string dev_name = string_strip(str: name);
157 if (dev_name.empty()) {
158 throw std::invalid_argument("invalid device specification");
159 }
160 auto * dev = ggml_backend_dev_by_name(name: dev_name.c_str());
161 if (!dev || ggml_backend_dev_type(device: dev) == GGML_BACKEND_DEVICE_TYPE_CPU) {
162 throw std::invalid_argument(string_format(fmt: "invalid device: %s", dev_name.c_str()));
163 }
164 devices.push_back(x: dev);
165 }
166
167 devices.push_back(x: nullptr);
168 return devices;
169}
170
171static void register_rpc_server_list(const std::string & servers) {
172 auto rpc_servers = string_split<std::string>(input: servers, separator: ',');
173 if (rpc_servers.empty()) {
174 throw std::invalid_argument("no RPC servers specified");
175 }
176
177 auto * rpc_reg = ggml_backend_reg_by_name(name: "RPC");
178 if (!rpc_reg) {
179 throw std::invalid_argument("failed to find RPC backend");
180 }
181
182 using add_rpc_server_fn = ggml_backend_reg_t (*)(const char * endpoint);
183 auto * ggml_backend_rpc_add_server_fn = (add_rpc_server_fn) ggml_backend_reg_get_proc_address(reg: rpc_reg, name: "ggml_backend_rpc_add_server");
184 if (!ggml_backend_rpc_add_server_fn) {
185 throw std::invalid_argument("failed to find RPC add server function");
186 }
187 for (const auto & server : rpc_servers) {
188 auto reg = ggml_backend_rpc_add_server_fn(server.c_str());
189 ggml_backend_register(reg);
190 }
191}
192
193static std::string devices_to_string(const std::vector<ggml_backend_dev_t> & devices) {
194 if (devices.empty()) {
195 return "auto";
196 }
197
198 if (devices.size() == 1 && devices[0] == nullptr) {
199 return "none";
200 }
201
202 std::vector<std::string> names;
203 for (auto * dev : devices) {
204 if (dev == nullptr) {
205 break;
206 }
207 names.push_back(x: ggml_backend_dev_name(device: dev));
208 }
209
210 return join(values: names, delim: "/");
211}
212
213// command line params
214enum output_formats { NONE, CSV, JSON, JSONL, MARKDOWN, SQL };
215
216static const char * output_format_str(output_formats format) {
217 switch (format) {
218 case NONE:
219 return "none";
220 case CSV:
221 return "csv";
222 case JSON:
223 return "json";
224 case JSONL:
225 return "jsonl";
226 case MARKDOWN:
227 return "md";
228 case SQL:
229 return "sql";
230 default:
231 GGML_ABORT("invalid output format");
232 }
233}
234
235static bool output_format_from_str(const std::string & s, output_formats & format) {
236 if (s == "none") {
237 format = NONE;
238 } else if (s == "csv") {
239 format = CSV;
240 } else if (s == "json") {
241 format = JSON;
242 } else if (s == "jsonl") {
243 format = JSONL;
244 } else if (s == "md") {
245 format = MARKDOWN;
246 } else if (s == "sql") {
247 format = SQL;
248 } else {
249 return false;
250 }
251 return true;
252}
253
254static const char * split_mode_str(llama_split_mode mode) {
255 switch (mode) {
256 case LLAMA_SPLIT_MODE_NONE:
257 return "none";
258 case LLAMA_SPLIT_MODE_LAYER:
259 return "layer";
260 case LLAMA_SPLIT_MODE_ROW:
261 return "row";
262 default:
263 GGML_ABORT("invalid split mode");
264 }
265}
266
267static std::string pair_str(const std::pair<int, int> & p) {
268 static char buf[32];
269 snprintf(s: buf, maxlen: sizeof(buf), format: "%d,%d", p.first, p.second);
270 return buf;
271}
272
273static std::vector<int> parse_int_range(const std::string & s) {
274 // first[-last[(+|*)step]]
275 std::regex range_regex(R"(^(\d+)(?:-(\d+)(?:([\+|\*])(\d+))?)?(?:,|$))");
276
277 std::smatch match;
278 std::string::const_iterator search_start(s.cbegin());
279 std::vector<int> result;
280 while (std::regex_search(s: search_start, e: s.cend(), m&: match, re: range_regex)) {
281 int first = std::stoi(str: match[1]);
282 int last = match[2].matched ? std::stoi(str: match[2]) : first;
283 char op = match[3].matched ? match[3].str()[0] : '+';
284 int step = match[4].matched ? std::stoi(str: match[4]) : 1;
285
286 for (int i = first; i <= last;) {
287 result.push_back(x: i);
288
289 int prev_i = i;
290
291 if (op == '+') {
292 i += step;
293 } else if (op == '*') {
294 i *= step;
295 } else {
296 throw std::invalid_argument("invalid range format");
297 }
298
299 if (i <= prev_i) {
300 throw std::invalid_argument("invalid range");
301 }
302 }
303 search_start = match.suffix().first;
304 }
305
306 if (search_start != s.cend()) {
307 throw std::invalid_argument("invalid range format");
308 }
309
310 return result;
311}
312
313struct cmd_params {
314 std::vector<std::string> model;
315 std::vector<int> n_prompt;
316 std::vector<int> n_gen;
317 std::vector<std::pair<int, int>> n_pg;
318 std::vector<int> n_depth;
319 std::vector<int> n_batch;
320 std::vector<int> n_ubatch;
321 std::vector<ggml_type> type_k;
322 std::vector<ggml_type> type_v;
323 std::vector<int> n_threads;
324 std::vector<std::string> cpu_mask;
325 std::vector<bool> cpu_strict;
326 std::vector<int> poll;
327 std::vector<int> n_gpu_layers;
328 std::vector<int> n_cpu_moe;
329 std::vector<llama_split_mode> split_mode;
330 std::vector<int> main_gpu;
331 std::vector<bool> no_kv_offload;
332 std::vector<bool> flash_attn;
333 std::vector<std::vector<ggml_backend_dev_t>> devices;
334 std::vector<std::vector<float>> tensor_split;
335 std::vector<std::vector<llama_model_tensor_buft_override>> tensor_buft_overrides;
336 std::vector<bool> use_mmap;
337 std::vector<bool> embeddings;
338 std::vector<bool> no_op_offload;
339 std::vector<bool> no_host;
340 ggml_numa_strategy numa;
341 int reps;
342 ggml_sched_priority prio;
343 int delay;
344 bool verbose;
345 bool progress;
346 bool no_warmup;
347 output_formats output_format;
348 output_formats output_format_stderr;
349};
350
351static const cmd_params cmd_params_defaults = {
352 /* model */ { "models/7B/ggml-model-q4_0.gguf" },
353 /* n_prompt */ { 512 },
354 /* n_gen */ { 128 },
355 /* n_pg */ {},
356 /* n_depth */ { 0 },
357 /* n_batch */ { 2048 },
358 /* n_ubatch */ { 512 },
359 /* type_k */ { GGML_TYPE_F16 },
360 /* type_v */ { GGML_TYPE_F16 },
361 /* n_threads */ { cpu_get_num_math() },
362 /* cpu_mask */ { "0x0" },
363 /* cpu_strict */ { false },
364 /* poll */ { 50 },
365 /* n_gpu_layers */ { 99 },
366 /* n_cpu_moe */ { 0 },
367 /* split_mode */ { LLAMA_SPLIT_MODE_LAYER },
368 /* main_gpu */ { 0 },
369 /* no_kv_offload */ { false },
370 /* flash_attn */ { false },
371 /* devices */ { {} },
372 /* tensor_split */ { std::vector<float>(llama_max_devices(), 0.0f) },
373 /* tensor_buft_overrides*/ { std::vector<llama_model_tensor_buft_override>{ { .pattern: nullptr, .buft: nullptr } } },
374 /* use_mmap */ { true },
375 /* embeddings */ { false },
376 /* no_op_offload */ { false },
377 /* no_host */ { false },
378 /* numa */ GGML_NUMA_STRATEGY_DISABLED,
379 /* reps */ 5,
380 /* prio */ GGML_SCHED_PRIO_NORMAL,
381 /* delay */ 0,
382 /* verbose */ false,
383 /* progress */ false,
384 /* no_warmup */ false,
385 /* output_format */ MARKDOWN,
386 /* output_format_stderr */ NONE,
387};
388
389static void print_usage(int /* argc */, char ** argv) {
390 printf(format: "usage: %s [options]\n", argv[0]);
391 printf(format: "\n");
392 printf(format: "options:\n");
393 printf(format: " -h, --help\n");
394 printf(format: " --numa <distribute|isolate|numactl> numa mode (default: disabled)\n");
395 printf(format: " -r, --repetitions <n> number of times to repeat each test (default: %d)\n",
396 cmd_params_defaults.reps);
397 printf(format: " --prio <-1|0|1|2|3> process/thread priority (default: %d)\n",
398 cmd_params_defaults.prio);
399 printf(format: " --delay <0...N> (seconds) delay between each test (default: %d)\n",
400 cmd_params_defaults.delay);
401 printf(format: " -o, --output <csv|json|jsonl|md|sql> output format printed to stdout (default: %s)\n",
402 output_format_str(format: cmd_params_defaults.output_format));
403 printf(format: " -oe, --output-err <csv|json|jsonl|md|sql> output format printed to stderr (default: %s)\n",
404 output_format_str(format: cmd_params_defaults.output_format_stderr));
405 printf(format: " --list-devices list available devices and exit\n");
406 printf(format: " -v, --verbose verbose output\n");
407 printf(format: " --progress print test progress indicators\n");
408 printf(format: " --no-warmup skip warmup runs before benchmarking\n");
409 if (llama_supports_rpc()) {
410 printf(format: " -rpc, --rpc <rpc_servers> register RPC devices (comma separated)\n");
411 }
412 printf(format: "\n");
413 printf(format: "test parameters:\n");
414 printf(format: " -m, --model <filename> (default: %s)\n", join(values: cmd_params_defaults.model, delim: ",").c_str());
415 printf(format: " -p, --n-prompt <n> (default: %s)\n",
416 join(values: cmd_params_defaults.n_prompt, delim: ",").c_str());
417 printf(format: " -n, --n-gen <n> (default: %s)\n", join(values: cmd_params_defaults.n_gen, delim: ",").c_str());
418 printf(format: " -pg <pp,tg> (default: %s)\n",
419 join(values: transform_to_str(values: cmd_params_defaults.n_pg, f: pair_str), delim: ",").c_str());
420 printf(format: " -d, --n-depth <n> (default: %s)\n",
421 join(values: cmd_params_defaults.n_depth, delim: ",").c_str());
422 printf(format: " -b, --batch-size <n> (default: %s)\n",
423 join(values: cmd_params_defaults.n_batch, delim: ",").c_str());
424 printf(format: " -ub, --ubatch-size <n> (default: %s)\n",
425 join(values: cmd_params_defaults.n_ubatch, delim: ",").c_str());
426 printf(format: " -ctk, --cache-type-k <t> (default: %s)\n",
427 join(values: transform_to_str(values: cmd_params_defaults.type_k, f: ggml_type_name), delim: ",").c_str());
428 printf(format: " -ctv, --cache-type-v <t> (default: %s)\n",
429 join(values: transform_to_str(values: cmd_params_defaults.type_v, f: ggml_type_name), delim: ",").c_str());
430 printf(format: " -t, --threads <n> (default: %s)\n",
431 join(values: cmd_params_defaults.n_threads, delim: ",").c_str());
432 printf(format: " -C, --cpu-mask <hex,hex> (default: %s)\n",
433 join(values: cmd_params_defaults.cpu_mask, delim: ",").c_str());
434 printf(format: " --cpu-strict <0|1> (default: %s)\n",
435 join(values: cmd_params_defaults.cpu_strict, delim: ",").c_str());
436 printf(format: " --poll <0...100> (default: %s)\n", join(values: cmd_params_defaults.poll, delim: ",").c_str());
437 printf(format: " -ngl, --n-gpu-layers <n> (default: %s)\n",
438 join(values: cmd_params_defaults.n_gpu_layers, delim: ",").c_str());
439 printf(format: " -ncmoe, --n-cpu-moe <n> (default: %s)\n",
440 join(values: cmd_params_defaults.n_cpu_moe, delim: ",").c_str());
441 printf(format: " -sm, --split-mode <none|layer|row> (default: %s)\n",
442 join(values: transform_to_str(values: cmd_params_defaults.split_mode, f: split_mode_str), delim: ",").c_str());
443 printf(format: " -mg, --main-gpu <i> (default: %s)\n",
444 join(values: cmd_params_defaults.main_gpu, delim: ",").c_str());
445 printf(format: " -nkvo, --no-kv-offload <0|1> (default: %s)\n",
446 join(values: cmd_params_defaults.no_kv_offload, delim: ",").c_str());
447 printf(format: " -fa, --flash-attn <0|1> (default: %s)\n",
448 join(values: cmd_params_defaults.flash_attn, delim: ",").c_str());
449 printf(format: " -dev, --device <dev0/dev1/...> (default: auto)\n");
450 printf(format: " -mmp, --mmap <0|1> (default: %s)\n",
451 join(values: cmd_params_defaults.use_mmap, delim: ",").c_str());
452 printf(format: " -embd, --embeddings <0|1> (default: %s)\n",
453 join(values: cmd_params_defaults.embeddings, delim: ",").c_str());
454 printf(format: " -ts, --tensor-split <ts0/ts1/..> (default: 0)\n");
455 printf(format: " -ot --override-tensor <tensor name pattern>=<buffer type>;...\n");
456 printf(format: " (default: disabled)\n");
457 printf(format: " -nopo, --no-op-offload <0|1> (default: 0)\n");
458 printf(format: " --no-host <0|1> (default: %s)\n",
459 join(values: cmd_params_defaults.no_host, delim: ",").c_str());
460 printf(format: "\n");
461 printf(
462 format: "Multiple values can be given for each parameter by separating them with ','\n"
463 "or by specifying the parameter multiple times. Ranges can be given as\n"
464 "'first-last' or 'first-last+step' or 'first-last*mult'.\n");
465}
466
467static ggml_type ggml_type_from_name(const std::string & s) {
468 if (s == "f16") {
469 return GGML_TYPE_F16;
470 }
471 if (s == "bf16") {
472 return GGML_TYPE_BF16;
473 }
474 if (s == "q8_0") {
475 return GGML_TYPE_Q8_0;
476 }
477 if (s == "q4_0") {
478 return GGML_TYPE_Q4_0;
479 }
480 if (s == "q4_1") {
481 return GGML_TYPE_Q4_1;
482 }
483 if (s == "q5_0") {
484 return GGML_TYPE_Q5_0;
485 }
486 if (s == "q5_1") {
487 return GGML_TYPE_Q5_1;
488 }
489 if (s == "iq4_nl") {
490 return GGML_TYPE_IQ4_NL;
491 }
492
493 return GGML_TYPE_COUNT;
494}
495
496static cmd_params parse_cmd_params(int argc, char ** argv) {
497 cmd_params params;
498 std::string arg;
499 bool invalid_param = false;
500 const std::string arg_prefix = "--";
501 const char split_delim = ',';
502
503 params.verbose = cmd_params_defaults.verbose;
504 params.output_format = cmd_params_defaults.output_format;
505 params.output_format_stderr = cmd_params_defaults.output_format_stderr;
506 params.reps = cmd_params_defaults.reps;
507 params.numa = cmd_params_defaults.numa;
508 params.prio = cmd_params_defaults.prio;
509 params.delay = cmd_params_defaults.delay;
510 params.progress = cmd_params_defaults.progress;
511 params.no_warmup = cmd_params_defaults.no_warmup;
512
513 for (int i = 1; i < argc; i++) {
514 arg = argv[i];
515 if (arg.compare(pos: 0, n: arg_prefix.size(), str: arg_prefix) == 0) {
516 std::replace(first: arg.begin(), last: arg.end(), old_value: '_', new_value: '-');
517 }
518
519 try {
520 if (arg == "-h" || arg == "--help") {
521 print_usage(argc, argv);
522 exit(status: 0);
523 } else if (arg == "-m" || arg == "--model") {
524 if (++i >= argc) {
525 invalid_param = true;
526 break;
527 }
528 auto p = string_split<std::string>(input: argv[i], separator: split_delim);
529 params.model.insert(position: params.model.end(), first: p.begin(), last: p.end());
530 } else if (arg == "-p" || arg == "--n-prompt") {
531 if (++i >= argc) {
532 invalid_param = true;
533 break;
534 }
535 auto p = parse_int_range(s: argv[i]);
536 params.n_prompt.insert(position: params.n_prompt.end(), first: p.begin(), last: p.end());
537 } else if (arg == "-n" || arg == "--n-gen") {
538 if (++i >= argc) {
539 invalid_param = true;
540 break;
541 }
542 auto p = parse_int_range(s: argv[i]);
543 params.n_gen.insert(position: params.n_gen.end(), first: p.begin(), last: p.end());
544 } else if (arg == "-pg") {
545 if (++i >= argc) {
546 invalid_param = true;
547 break;
548 }
549 auto p = string_split<std::string>(input: argv[i], separator: ',');
550 if (p.size() != 2) {
551 invalid_param = true;
552 break;
553 }
554 params.n_pg.push_back(x: { std::stoi(str: p[0]), std::stoi(str: p[1]) });
555 } else if (arg == "-d" || arg == "--n-depth") {
556 if (++i >= argc) {
557 invalid_param = true;
558 break;
559 }
560 auto p = parse_int_range(s: argv[i]);
561 params.n_depth.insert(position: params.n_depth.end(), first: p.begin(), last: p.end());
562 } else if (arg == "-b" || arg == "--batch-size") {
563 if (++i >= argc) {
564 invalid_param = true;
565 break;
566 }
567 auto p = parse_int_range(s: argv[i]);
568 params.n_batch.insert(position: params.n_batch.end(), first: p.begin(), last: p.end());
569 } else if (arg == "-ub" || arg == "--ubatch-size") {
570 if (++i >= argc) {
571 invalid_param = true;
572 break;
573 }
574 auto p = parse_int_range(s: argv[i]);
575 params.n_ubatch.insert(position: params.n_ubatch.end(), first: p.begin(), last: p.end());
576 } else if (arg == "-ctk" || arg == "--cache-type-k") {
577 if (++i >= argc) {
578 invalid_param = true;
579 break;
580 }
581 auto p = string_split<std::string>(input: argv[i], separator: split_delim);
582
583 std::vector<ggml_type> types;
584 for (const auto & t : p) {
585 ggml_type gt = ggml_type_from_name(s: t);
586 if (gt == GGML_TYPE_COUNT) {
587 invalid_param = true;
588 break;
589 }
590 types.push_back(x: gt);
591 }
592 if (invalid_param) {
593 break;
594 }
595 params.type_k.insert(position: params.type_k.end(), first: types.begin(), last: types.end());
596 } else if (arg == "-ctv" || arg == "--cache-type-v") {
597 if (++i >= argc) {
598 invalid_param = true;
599 break;
600 }
601 auto p = string_split<std::string>(input: argv[i], separator: split_delim);
602
603 std::vector<ggml_type> types;
604 for (const auto & t : p) {
605 ggml_type gt = ggml_type_from_name(s: t);
606 if (gt == GGML_TYPE_COUNT) {
607 invalid_param = true;
608 break;
609 }
610 types.push_back(x: gt);
611 }
612 if (invalid_param) {
613 break;
614 }
615 params.type_v.insert(position: params.type_v.end(), first: types.begin(), last: types.end());
616 } else if (arg == "-dev" || arg == "--device") {
617 if (++i >= argc) {
618 invalid_param = true;
619 break;
620 }
621 auto combos = string_split<std::string>(input: argv[i], separator: split_delim);
622 for (const auto & combo : combos) {
623 try {
624 params.devices.push_back(x: parse_devices_arg(value: combo));
625 } catch (const std::exception & e) {
626 fprintf(stderr, format: "error: %s\n", e.what());
627 invalid_param = true;
628 break;
629 }
630 }
631 if (invalid_param) {
632 break;
633 }
634 } else if (arg == "--list-devices") {
635 std::vector<ggml_backend_dev_t> devices;
636 for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
637 auto * dev = ggml_backend_dev_get(index: i);
638 if (ggml_backend_dev_type(device: dev) != GGML_BACKEND_DEVICE_TYPE_CPU) {
639 devices.push_back(x: dev);
640 }
641 }
642 printf(format: "Available devices:\n");
643 if (devices.empty()) {
644 printf(format: " (none)\n");
645 }
646 for (auto * dev : devices) {
647 size_t free, total;
648 ggml_backend_dev_memory(device: dev, free: &free, total: &total);
649 printf(format: " %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(device: dev), ggml_backend_dev_description(device: dev), total / 1024 / 1024, free / 1024 / 1024);
650 }
651 exit(status: 0);
652 } else if (arg == "-t" || arg == "--threads") {
653 if (++i >= argc) {
654 invalid_param = true;
655 break;
656 }
657 auto p = parse_int_range(s: argv[i]);
658 params.n_threads.insert(position: params.n_threads.end(), first: p.begin(), last: p.end());
659 } else if (arg == "-C" || arg == "--cpu-mask") {
660 if (++i >= argc) {
661 invalid_param = true;
662 break;
663 }
664 auto p = string_split<std::string>(input: argv[i], separator: split_delim);
665 params.cpu_mask.insert(position: params.cpu_mask.end(), first: p.begin(), last: p.end());
666 } else if (arg == "--cpu-strict") {
667 if (++i >= argc) {
668 invalid_param = true;
669 break;
670 }
671 auto p = string_split<bool>(str: argv[i], delim: split_delim);
672 params.cpu_strict.insert(position: params.cpu_strict.end(), first: p.begin(), last: p.end());
673 } else if (arg == "--poll") {
674 if (++i >= argc) {
675 invalid_param = true;
676 break;
677 }
678 auto p = parse_int_range(s: argv[i]);
679 params.poll.insert(position: params.poll.end(), first: p.begin(), last: p.end());
680 } else if (arg == "-ngl" || arg == "--n-gpu-layers") {
681 if (++i >= argc) {
682 invalid_param = true;
683 break;
684 }
685 auto p = parse_int_range(s: argv[i]);
686 params.n_gpu_layers.insert(position: params.n_gpu_layers.end(), first: p.begin(), last: p.end());
687 } else if (arg == "-ncmoe" || arg == "--n-cpu-moe") {
688 if (++i >= argc) {
689 invalid_param = true;
690 break;
691 }
692 auto p = parse_int_range(s: argv[i]);
693 params.n_cpu_moe.insert(position: params.n_cpu_moe.end(), first: p.begin(), last: p.end());
694 } else if (llama_supports_rpc() && (arg == "-rpc" || arg == "--rpc")) {
695 if (++i >= argc) {
696 invalid_param = true;
697 break;
698 }
699 try {
700 register_rpc_server_list(servers: argv[i]);
701 } catch (const std::exception & e) {
702 fprintf(stderr, format: "error: %s\n", e.what());
703 invalid_param = true;
704 break;
705 }
706 } else if (arg == "-sm" || arg == "--split-mode") {
707 if (++i >= argc) {
708 invalid_param = true;
709 break;
710 }
711 auto p = string_split<std::string>(input: argv[i], separator: split_delim);
712
713 std::vector<llama_split_mode> modes;
714 for (const auto & m : p) {
715 llama_split_mode mode;
716 if (m == "none") {
717 mode = LLAMA_SPLIT_MODE_NONE;
718 } else if (m == "layer") {
719 mode = LLAMA_SPLIT_MODE_LAYER;
720 } else if (m == "row") {
721 mode = LLAMA_SPLIT_MODE_ROW;
722 } else {
723 invalid_param = true;
724 break;
725 }
726 modes.push_back(x: mode);
727 }
728 if (invalid_param) {
729 break;
730 }
731 params.split_mode.insert(position: params.split_mode.end(), first: modes.begin(), last: modes.end());
732 } else if (arg == "-mg" || arg == "--main-gpu") {
733 if (++i >= argc) {
734 invalid_param = true;
735 break;
736 }
737 params.main_gpu = parse_int_range(s: argv[i]);
738 } else if (arg == "-nkvo" || arg == "--no-kv-offload") {
739 if (++i >= argc) {
740 invalid_param = true;
741 break;
742 }
743 auto p = string_split<bool>(str: argv[i], delim: split_delim);
744 params.no_kv_offload.insert(position: params.no_kv_offload.end(), first: p.begin(), last: p.end());
745 } else if (arg == "--numa") {
746 if (++i >= argc) {
747 invalid_param = true;
748 break;
749 }
750 std::string value(argv[i]);
751 if (value == "distribute" || value == "") {
752 params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE;
753 } else if (value == "isolate") {
754 params.numa = GGML_NUMA_STRATEGY_ISOLATE;
755 } else if (value == "numactl") {
756 params.numa = GGML_NUMA_STRATEGY_NUMACTL;
757 } else {
758 invalid_param = true;
759 break;
760 }
761 } else if (arg == "-fa" || arg == "--flash-attn") {
762 if (++i >= argc) {
763 invalid_param = true;
764 break;
765 }
766 auto p = string_split<bool>(str: argv[i], delim: split_delim);
767 params.flash_attn.insert(position: params.flash_attn.end(), first: p.begin(), last: p.end());
768 } else if (arg == "-mmp" || arg == "--mmap") {
769 if (++i >= argc) {
770 invalid_param = true;
771 break;
772 }
773 auto p = string_split<bool>(str: argv[i], delim: split_delim);
774 params.use_mmap.insert(position: params.use_mmap.end(), first: p.begin(), last: p.end());
775 } else if (arg == "-embd" || arg == "--embeddings") {
776 if (++i >= argc) {
777 invalid_param = true;
778 break;
779 }
780 auto p = string_split<bool>(str: argv[i], delim: split_delim);
781 params.embeddings.insert(position: params.embeddings.end(), first: p.begin(), last: p.end());
782 } else if (arg == "-nopo" || arg == "--no-op-offload") {
783 if (++i >= argc) {
784 invalid_param = true;
785 break;
786 }
787 auto p = string_split<bool>(str: argv[i], delim: split_delim);
788 params.no_op_offload.insert(position: params.no_op_offload.end(), first: p.begin(), last: p.end());
789 } else if (arg == "--no-host") {
790 if (++i >= argc) {
791 invalid_param = true;
792 break;
793 }
794 auto p = string_split<bool>(str: argv[i], delim: split_delim);
795 params.no_host.insert(position: params.no_host.end(), first: p.begin(), last: p.end());
796 } else if (arg == "-ts" || arg == "--tensor-split") {
797 if (++i >= argc) {
798 invalid_param = true;
799 break;
800 }
801 for (auto ts : string_split<std::string>(input: argv[i], separator: split_delim)) {
802 // split string by ; and /
803 const std::regex regex{ R"([;/]+)" };
804 std::sregex_token_iterator it{ ts.begin(), ts.end(), regex, -1 };
805 std::vector<std::string> split_arg{ it, {} };
806 GGML_ASSERT(split_arg.size() <= llama_max_devices());
807
808 std::vector<float> tensor_split(llama_max_devices());
809 for (size_t i = 0; i < llama_max_devices(); ++i) {
810 if (i < split_arg.size()) {
811 tensor_split[i] = std::stof(str: split_arg[i]);
812 } else {
813 tensor_split[i] = 0.0f;
814 }
815 }
816 params.tensor_split.push_back(x: tensor_split);
817 }
818 } else if (arg == "-ot" || arg == "--override-tensor") {
819 if (++i >= argc) {
820 invalid_param = true;
821 break;
822 }
823 auto * value = argv[i];
824 /* static */ std::map<std::string, ggml_backend_buffer_type_t> buft_list;
825 if (buft_list.empty()) {
826 // enumerate all the devices and add their buffer types to the list
827 for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
828 auto * dev = ggml_backend_dev_get(index: i);
829 auto * buft = ggml_backend_dev_buffer_type(device: dev);
830 if (buft) {
831 buft_list[ggml_backend_buft_name(buft)] = buft;
832 }
833 }
834 }
835 auto override_group_span_len = std::strcspn(s: value, reject: ",");
836 bool last_group = false;
837 do {
838 if (override_group_span_len == 0) {
839 // Adds an empty override-tensors for an empty span
840 params.tensor_buft_overrides.push_back(x: {{}});
841 if (value[override_group_span_len] == '\0') {
842 value = &value[override_group_span_len];
843 last_group = true;
844 } else {
845 value = &value[override_group_span_len + 1];
846 override_group_span_len = std::strcspn(s: value, reject: ",");
847 }
848 continue;
849 }
850 // Stamps null terminators into the argv
851 // value for this option to avoid the
852 // memory leak present in the implementation
853 // over in arg.cpp. Acceptable because we
854 // only parse these args once in this program.
855 auto * override_group = value;
856 if (value[override_group_span_len] == '\0') {
857 value = &value[override_group_span_len];
858 last_group = true;
859 } else {
860 value[override_group_span_len] = '\0';
861 value = &value[override_group_span_len + 1];
862 }
863 std::vector<llama_model_tensor_buft_override> group_tensor_buft_overrides{};
864 auto override_span_len = std::strcspn(s: override_group, reject: ";");
865 while (override_span_len > 0) {
866 auto * override = override_group;
867 if (override_group[override_span_len] != '\0') {
868 override_group[override_span_len] = '\0';
869 override_group = &override_group[override_span_len + 1];
870 } else {
871 override_group = &override_group[override_span_len];
872 }
873 auto tensor_name_span_len = std::strcspn(s: override, reject: "=");
874 if (tensor_name_span_len >= override_span_len) {
875 invalid_param = true;
876 break;
877 }
878 override[tensor_name_span_len] = '\0';
879 auto * tensor_name = override;
880 auto * buffer_type = &override[tensor_name_span_len + 1];
881 if (buft_list.find(x: buffer_type) == buft_list.end()) {
882 printf(format: "error: unrecognized buffer type '%s'\n", buffer_type);
883 printf(format: "Available buffer types:\n");
884 for (const auto & it : buft_list) {
885 printf(format: " %s\n", ggml_backend_buft_name(buft: it.second));
886 }
887 invalid_param = true;
888 break;
889 }
890 group_tensor_buft_overrides.push_back(x: {.pattern: tensor_name, .buft: buft_list.at(k: buffer_type)});
891 override_span_len = std::strcspn(s: override_group, reject: ";");
892 }
893 if (invalid_param) {
894 break;
895 }
896 group_tensor_buft_overrides.push_back(x: {.pattern: nullptr,.buft: nullptr});
897 params.tensor_buft_overrides.push_back(x: group_tensor_buft_overrides);
898 override_group_span_len = std::strcspn(s: value, reject: ",");
899 } while (!last_group);
900 } else if (arg == "-r" || arg == "--repetitions") {
901 if (++i >= argc) {
902 invalid_param = true;
903 break;
904 }
905 params.reps = std::stoi(str: argv[i]);
906 } else if (arg == "--prio") {
907 if (++i >= argc) {
908 invalid_param = true;
909 break;
910 }
911 params.prio = (enum ggml_sched_priority) std::stoi(str: argv[i]);
912 } else if (arg == "--delay") {
913 if (++i >= argc) {
914 invalid_param = true;
915 break;
916 }
917 params.delay = std::stoi(str: argv[i]);
918 } else if (arg == "-o" || arg == "--output") {
919 if (++i >= argc) {
920 invalid_param = true;
921 break;
922 }
923 invalid_param = !output_format_from_str(s: argv[i], format&: params.output_format);
924 } else if (arg == "-oe" || arg == "--output-err") {
925 if (++i >= argc) {
926 invalid_param = true;
927 break;
928 }
929 invalid_param = !output_format_from_str(s: argv[i], format&: params.output_format_stderr);
930 } else if (arg == "-v" || arg == "--verbose") {
931 params.verbose = true;
932 } else if (arg == "--progress") {
933 params.progress = true;
934 } else if (arg == "--no-warmup") {
935 params.no_warmup = true;
936 } else {
937 invalid_param = true;
938 break;
939 }
940 } catch (const std::exception & e) {
941 fprintf(stderr, format: "error: %s\n", e.what());
942 invalid_param = true;
943 break;
944 }
945 }
946
947 if (invalid_param) {
948 fprintf(stderr, format: "error: invalid parameter for argument: %s\n", arg.c_str());
949 print_usage(argc, argv);
950 exit(status: 1);
951 }
952
953 // set defaults
954 if (params.model.empty()) {
955 params.model = cmd_params_defaults.model;
956 }
957 if (params.n_prompt.empty()) {
958 params.n_prompt = cmd_params_defaults.n_prompt;
959 }
960 if (params.n_gen.empty()) {
961 params.n_gen = cmd_params_defaults.n_gen;
962 }
963 if (params.n_pg.empty()) {
964 params.n_pg = cmd_params_defaults.n_pg;
965 }
966 if (params.n_depth.empty()) {
967 params.n_depth = cmd_params_defaults.n_depth;
968 }
969 if (params.n_batch.empty()) {
970 params.n_batch = cmd_params_defaults.n_batch;
971 }
972 if (params.n_ubatch.empty()) {
973 params.n_ubatch = cmd_params_defaults.n_ubatch;
974 }
975 if (params.type_k.empty()) {
976 params.type_k = cmd_params_defaults.type_k;
977 }
978 if (params.type_v.empty()) {
979 params.type_v = cmd_params_defaults.type_v;
980 }
981 if (params.n_gpu_layers.empty()) {
982 params.n_gpu_layers = cmd_params_defaults.n_gpu_layers;
983 }
984 if (params.n_cpu_moe.empty()) {
985 params.n_cpu_moe = cmd_params_defaults.n_cpu_moe;
986 }
987 if (params.split_mode.empty()) {
988 params.split_mode = cmd_params_defaults.split_mode;
989 }
990 if (params.main_gpu.empty()) {
991 params.main_gpu = cmd_params_defaults.main_gpu;
992 }
993 if (params.no_kv_offload.empty()) {
994 params.no_kv_offload = cmd_params_defaults.no_kv_offload;
995 }
996 if (params.flash_attn.empty()) {
997 params.flash_attn = cmd_params_defaults.flash_attn;
998 }
999 if (params.devices.empty()) {
1000 params.devices = cmd_params_defaults.devices;
1001 }
1002 if (params.tensor_split.empty()) {
1003 params.tensor_split = cmd_params_defaults.tensor_split;
1004 }
1005 if (params.tensor_buft_overrides.empty()) {
1006 params.tensor_buft_overrides = cmd_params_defaults.tensor_buft_overrides;
1007 }
1008 if (params.use_mmap.empty()) {
1009 params.use_mmap = cmd_params_defaults.use_mmap;
1010 }
1011 if (params.embeddings.empty()) {
1012 params.embeddings = cmd_params_defaults.embeddings;
1013 }
1014 if (params.no_op_offload.empty()) {
1015 params.no_op_offload = cmd_params_defaults.no_op_offload;
1016 }
1017 if (params.no_host.empty()) {
1018 params.no_host = cmd_params_defaults.no_host;
1019 }
1020 if (params.n_threads.empty()) {
1021 params.n_threads = cmd_params_defaults.n_threads;
1022 }
1023 if (params.cpu_mask.empty()) {
1024 params.cpu_mask = cmd_params_defaults.cpu_mask;
1025 }
1026 if (params.cpu_strict.empty()) {
1027 params.cpu_strict = cmd_params_defaults.cpu_strict;
1028 }
1029 if (params.poll.empty()) {
1030 params.poll = cmd_params_defaults.poll;
1031 }
1032
1033 return params;
1034}
1035
1036struct cmd_params_instance {
1037 std::string model;
1038 int n_prompt;
1039 int n_gen;
1040 int n_depth;
1041 int n_batch;
1042 int n_ubatch;
1043 ggml_type type_k;
1044 ggml_type type_v;
1045 int n_threads;
1046 std::string cpu_mask;
1047 bool cpu_strict;
1048 int poll;
1049 int n_gpu_layers;
1050 int n_cpu_moe;
1051 llama_split_mode split_mode;
1052 int main_gpu;
1053 bool no_kv_offload;
1054 bool flash_attn;
1055 std::vector<ggml_backend_dev_t> devices;
1056 std::vector<float> tensor_split;
1057 std::vector<llama_model_tensor_buft_override> tensor_buft_overrides;
1058 bool use_mmap;
1059 bool embeddings;
1060 bool no_op_offload;
1061 bool no_host;
1062
1063 llama_model_params to_llama_mparams() const {
1064 llama_model_params mparams = llama_model_default_params();
1065
1066 mparams.n_gpu_layers = n_gpu_layers;
1067 if (!devices.empty()) {
1068 mparams.devices = const_cast<ggml_backend_dev_t *>(devices.data());
1069 }
1070 mparams.split_mode = split_mode;
1071 mparams.main_gpu = main_gpu;
1072 mparams.tensor_split = tensor_split.data();
1073 mparams.use_mmap = use_mmap;
1074 mparams.no_host = no_host;
1075
1076 if (n_cpu_moe <= 0) {
1077 if (tensor_buft_overrides.empty()) {
1078 mparams.tensor_buft_overrides = nullptr;
1079 } else {
1080 GGML_ASSERT(tensor_buft_overrides.back().pattern == nullptr &&
1081 "Tensor buffer overrides not terminated with empty pattern");
1082 mparams.tensor_buft_overrides = tensor_buft_overrides.data();
1083 }
1084 } else {
1085 static std::vector<llama_model_tensor_buft_override> merged;
1086 static std::vector<std::string> patterns;
1087
1088 merged.clear();
1089 patterns.clear();
1090
1091 auto first = tensor_buft_overrides.begin();
1092 auto last = tensor_buft_overrides.end();
1093 if (first != last && (last - 1)->pattern == nullptr) {
1094 --last;
1095 }
1096 merged.insert(position: merged.end(), first: first, last: last);
1097
1098 patterns.reserve(n: (size_t) n_cpu_moe);
1099 merged.reserve(n: merged.size() + (size_t) n_cpu_moe + 1);
1100
1101 for (int i = 0; i < n_cpu_moe; ++i) {
1102 patterns.push_back(x: llm_ffn_exps_block_regex(idx: i));
1103 merged.push_back(x: { .pattern: patterns.back().c_str(),
1104 .buft: ggml_backend_cpu_buffer_type() });
1105 }
1106
1107 merged.push_back(x: { .pattern: nullptr, .buft: nullptr });
1108
1109 mparams.tensor_buft_overrides = merged.data();
1110 }
1111
1112 return mparams;
1113 }
1114
1115 bool equal_mparams(const cmd_params_instance & other) const {
1116 return model == other.model && n_gpu_layers == other.n_gpu_layers && n_cpu_moe == other.n_cpu_moe &&
1117 split_mode == other.split_mode &&
1118 main_gpu == other.main_gpu && use_mmap == other.use_mmap && tensor_split == other.tensor_split &&
1119 devices == other.devices &&
1120 no_host == other.no_host &&
1121 vec_tensor_buft_override_equal(a: tensor_buft_overrides, b: other.tensor_buft_overrides);
1122 }
1123
1124 llama_context_params to_llama_cparams() const {
1125 llama_context_params cparams = llama_context_default_params();
1126
1127 cparams.n_ctx = n_prompt + n_gen + n_depth;
1128 cparams.n_batch = n_batch;
1129 cparams.n_ubatch = n_ubatch;
1130 cparams.type_k = type_k;
1131 cparams.type_v = type_v;
1132 cparams.offload_kqv = !no_kv_offload;
1133 cparams.flash_attn_type = flash_attn ? LLAMA_FLASH_ATTN_TYPE_ENABLED : LLAMA_FLASH_ATTN_TYPE_DISABLED;
1134 cparams.embeddings = embeddings;
1135 cparams.op_offload = !no_op_offload;
1136 cparams.swa_full = false;
1137
1138 return cparams;
1139 }
1140};
1141
1142static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_params & params) {
1143 std::vector<cmd_params_instance> instances;
1144
1145 // this ordering minimizes the number of times that each model needs to be reloaded
1146 // clang-format off
1147 for (const auto & m : params.model)
1148 for (const auto & nl : params.n_gpu_layers)
1149 for (const auto & ncmoe : params.n_cpu_moe)
1150 for (const auto & sm : params.split_mode)
1151 for (const auto & mg : params.main_gpu)
1152 for (const auto & devs : params.devices)
1153 for (const auto & ts : params.tensor_split)
1154 for (const auto & ot : params.tensor_buft_overrides)
1155 for (const auto & mmp : params.use_mmap)
1156 for (const auto & noh : params.no_host)
1157 for (const auto & embd : params.embeddings)
1158 for (const auto & nopo : params.no_op_offload)
1159 for (const auto & nb : params.n_batch)
1160 for (const auto & nub : params.n_ubatch)
1161 for (const auto & tk : params.type_k)
1162 for (const auto & tv : params.type_v)
1163 for (const auto & nkvo : params.no_kv_offload)
1164 for (const auto & fa : params.flash_attn)
1165 for (const auto & nt : params.n_threads)
1166 for (const auto & cm : params.cpu_mask)
1167 for (const auto & cs : params.cpu_strict)
1168 for (const auto & nd : params.n_depth)
1169 for (const auto & pl : params.poll) {
1170 for (const auto & n_prompt : params.n_prompt) {
1171 if (n_prompt == 0) {
1172 continue;
1173 }
1174 cmd_params_instance instance = {
1175 /* .model = */ m,
1176 /* .n_prompt = */ n_prompt,
1177 /* .n_gen = */ 0,
1178 /* .n_depth = */ nd,
1179 /* .n_batch = */ nb,
1180 /* .n_ubatch = */ nub,
1181 /* .type_k = */ tk,
1182 /* .type_v = */ tv,
1183 /* .n_threads = */ nt,
1184 /* .cpu_mask = */ cm,
1185 /* .cpu_strict = */ cs,
1186 /* .poll = */ pl,
1187 /* .n_gpu_layers = */ nl,
1188 /* .n_cpu_moe = */ ncmoe,
1189 /* .split_mode = */ sm,
1190 /* .main_gpu = */ mg,
1191 /* .no_kv_offload= */ nkvo,
1192 /* .flash_attn = */ fa,
1193 /* .devices = */ devs,
1194 /* .tensor_split = */ ts,
1195 /* .tensor_buft_overrides = */ ot,
1196 /* .use_mmap = */ mmp,
1197 /* .embeddings = */ embd,
1198 /* .no_op_offload= */ nopo,
1199 /* .no_host = */ noh,
1200 };
1201 instances.push_back(x: instance);
1202 }
1203
1204 for (const auto & n_gen : params.n_gen) {
1205 if (n_gen == 0) {
1206 continue;
1207 }
1208 cmd_params_instance instance = {
1209 /* .model = */ m,
1210 /* .n_prompt = */ 0,
1211 /* .n_gen = */ n_gen,
1212 /* .n_depth = */ nd,
1213 /* .n_batch = */ nb,
1214 /* .n_ubatch = */ nub,
1215 /* .type_k = */ tk,
1216 /* .type_v = */ tv,
1217 /* .n_threads = */ nt,
1218 /* .cpu_mask = */ cm,
1219 /* .cpu_strict = */ cs,
1220 /* .poll = */ pl,
1221 /* .n_gpu_layers = */ nl,
1222 /* .n_cpu_moe = */ ncmoe,
1223 /* .split_mode = */ sm,
1224 /* .main_gpu = */ mg,
1225 /* .no_kv_offload= */ nkvo,
1226 /* .flash_attn = */ fa,
1227 /* .devices = */ devs,
1228 /* .tensor_split = */ ts,
1229 /* .tensor_buft_overrides = */ ot,
1230 /* .use_mmap = */ mmp,
1231 /* .embeddings = */ embd,
1232 /* .no_op_offload= */ nopo,
1233 /* .no_host = */ noh,
1234 };
1235 instances.push_back(x: instance);
1236 }
1237
1238 for (const auto & n_pg : params.n_pg) {
1239 if (n_pg.first == 0 && n_pg.second == 0) {
1240 continue;
1241 }
1242 cmd_params_instance instance = {
1243 /* .model = */ m,
1244 /* .n_prompt = */ n_pg.first,
1245 /* .n_gen = */ n_pg.second,
1246 /* .n_depth = */ nd,
1247 /* .n_batch = */ nb,
1248 /* .n_ubatch = */ nub,
1249 /* .type_k = */ tk,
1250 /* .type_v = */ tv,
1251 /* .n_threads = */ nt,
1252 /* .cpu_mask = */ cm,
1253 /* .cpu_strict = */ cs,
1254 /* .poll = */ pl,
1255 /* .n_gpu_layers = */ nl,
1256 /* .n_cpu_moe = */ ncmoe,
1257 /* .split_mode = */ sm,
1258 /* .main_gpu = */ mg,
1259 /* .no_kv_offload= */ nkvo,
1260 /* .flash_attn = */ fa,
1261 /* .devices = */ devs,
1262 /* .tensor_split = */ ts,
1263 /* .tensor_buft_overrides = */ ot,
1264 /* .use_mmap = */ mmp,
1265 /* .embeddings = */ embd,
1266 /* .no_op_offload= */ nopo,
1267 /* .no_host = */ noh,
1268 };
1269 instances.push_back(x: instance);
1270 }
1271 }
1272 // clang-format on
1273
1274 return instances;
1275}
1276
1277struct test {
1278 static const std::string build_commit;
1279 static const int build_number;
1280 const std::string cpu_info;
1281 const std::string gpu_info;
1282 std::string model_filename;
1283 std::string model_type;
1284 uint64_t model_size;
1285 uint64_t model_n_params;
1286 int n_batch;
1287 int n_ubatch;
1288 int n_threads;
1289 std::string cpu_mask;
1290 bool cpu_strict;
1291 int poll;
1292 ggml_type type_k;
1293 ggml_type type_v;
1294 int n_gpu_layers;
1295 int n_cpu_moe;
1296 llama_split_mode split_mode;
1297 int main_gpu;
1298 bool no_kv_offload;
1299 bool flash_attn;
1300 std::vector<ggml_backend_dev_t> devices;
1301 std::vector<float> tensor_split;
1302 std::vector<llama_model_tensor_buft_override> tensor_buft_overrides;
1303 bool use_mmap;
1304 bool embeddings;
1305 bool no_op_offload;
1306 bool no_host;
1307 int n_prompt;
1308 int n_gen;
1309 int n_depth;
1310 std::string test_time;
1311 std::vector<uint64_t> samples_ns;
1312
1313 test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) :
1314 cpu_info(get_cpu_info()),
1315 gpu_info(get_gpu_info()) {
1316
1317 model_filename = inst.model;
1318 char buf[128];
1319 llama_model_desc(model: lmodel, buf, buf_size: sizeof(buf));
1320 model_type = buf;
1321 model_size = llama_model_size(model: lmodel);
1322 model_n_params = llama_model_n_params(model: lmodel);
1323 n_batch = inst.n_batch;
1324 n_ubatch = inst.n_ubatch;
1325 n_threads = inst.n_threads;
1326 cpu_mask = inst.cpu_mask;
1327 cpu_strict = inst.cpu_strict;
1328 poll = inst.poll;
1329 type_k = inst.type_k;
1330 type_v = inst.type_v;
1331 n_gpu_layers = inst.n_gpu_layers;
1332 n_cpu_moe = inst.n_cpu_moe;
1333 split_mode = inst.split_mode;
1334 main_gpu = inst.main_gpu;
1335 no_kv_offload = inst.no_kv_offload;
1336 flash_attn = inst.flash_attn;
1337 devices = inst.devices;
1338 tensor_split = inst.tensor_split;
1339 tensor_buft_overrides = inst.tensor_buft_overrides;
1340 use_mmap = inst.use_mmap;
1341 embeddings = inst.embeddings;
1342 no_op_offload = inst.no_op_offload;
1343 no_host = inst.no_host;
1344 n_prompt = inst.n_prompt;
1345 n_gen = inst.n_gen;
1346 n_depth = inst.n_depth;
1347 // RFC 3339 date-time format
1348 time_t t = time(NULL);
1349 std::strftime(s: buf, maxsize: sizeof(buf), format: "%FT%TZ", tp: gmtime(timer: &t));
1350 test_time = buf;
1351
1352 (void) ctx;
1353 }
1354
1355 uint64_t avg_ns() const { return ::avg(v: samples_ns); }
1356
1357 uint64_t stdev_ns() const { return ::stdev(v: samples_ns); }
1358
1359 std::vector<double> get_ts() const {
1360 int n_tokens = n_prompt + n_gen;
1361 std::vector<double> ts;
1362 std::transform(first: samples_ns.begin(), last: samples_ns.end(), result: std::back_inserter(x&: ts),
1363 unary_op: [n_tokens](uint64_t t) { return 1e9 * n_tokens / t; });
1364 return ts;
1365 }
1366
1367 double avg_ts() const { return ::avg(v: get_ts()); }
1368
1369 double stdev_ts() const { return ::stdev(v: get_ts()); }
1370
1371 static std::string get_backend() {
1372 std::vector<std::string> backends;
1373 bool rpc_used = false;
1374 for (size_t i = 0; i < ggml_backend_reg_count(); i++) {
1375 auto * reg = ggml_backend_reg_get(index: i);
1376 std::string name = ggml_backend_reg_name(reg);
1377 if (string_starts_with(str: name, prefix: "RPC")) {
1378 if (ggml_backend_reg_dev_count(reg) > 0) {
1379 rpc_used = true;
1380 }
1381 } else {
1382 if (name != "CPU") {
1383 backends.push_back(x: ggml_backend_reg_name(reg));
1384 }
1385 }
1386 }
1387 if (rpc_used) {
1388 backends.push_back(x: "RPC");
1389 }
1390 return backends.empty() ? "CPU" : join(values: backends, delim: ",");
1391 }
1392
1393 static const std::vector<std::string> & get_fields() {
1394 static const std::vector<std::string> fields = {
1395 "build_commit", "build_number", "cpu_info", "gpu_info", "backends",
1396 "model_filename", "model_type", "model_size", "model_n_params", "n_batch",
1397 "n_ubatch", "n_threads", "cpu_mask", "cpu_strict", "poll",
1398 "type_k", "type_v", "n_gpu_layers", "n_cpu_moe", "split_mode",
1399 "main_gpu", "no_kv_offload", "flash_attn", "devices", "tensor_split",
1400 "tensor_buft_overrides", "use_mmap", "embeddings", "no_op_offload",
1401 "no_host", "n_prompt", "n_gen", "n_depth", "test_time",
1402 "avg_ns", "stddev_ns", "avg_ts", "stddev_ts"
1403 };
1404 return fields;
1405 }
1406
1407 enum field_type { STRING, BOOL, INT, FLOAT };
1408
1409 static field_type get_field_type(const std::string & field) {
1410 if (field == "build_number" || field == "n_batch" || field == "n_ubatch" || field == "n_threads" ||
1411 field == "poll" || field == "model_size" || field == "model_n_params" || field == "n_gpu_layers" ||
1412 field == "main_gpu" || field == "n_prompt" || field == "n_gen" || field == "n_depth" || field == "avg_ns" ||
1413 field == "stddev_ns" || field == "no_op_offload" || field == "n_cpu_moe") {
1414 return INT;
1415 }
1416 if (field == "f16_kv" || field == "no_kv_offload" || field == "cpu_strict" || field == "flash_attn" ||
1417 field == "use_mmap" || field == "embeddings" || field == "no_host") {
1418 return BOOL;
1419 }
1420 if (field == "avg_ts" || field == "stddev_ts") {
1421 return FLOAT;
1422 }
1423 return STRING;
1424 }
1425
1426 std::vector<std::string> get_values() const {
1427 std::string tensor_split_str;
1428 std::string tensor_buft_overrides_str;
1429 int max_nonzero = 0;
1430 for (size_t i = 0; i < llama_max_devices(); i++) {
1431 if (tensor_split[i] > 0) {
1432 max_nonzero = i;
1433 }
1434 }
1435 for (int i = 0; i <= max_nonzero; i++) {
1436 char buf[32];
1437 snprintf(s: buf, maxlen: sizeof(buf), format: "%.2f", tensor_split[i]);
1438 tensor_split_str += buf;
1439 if (i < max_nonzero) {
1440 tensor_split_str += "/";
1441 }
1442 }
1443 if (tensor_buft_overrides.size() == 1) {
1444 // Last element of tensor_buft_overrides is always a null pattern
1445 // so if it is only one element long, it must be a null pattern.
1446 GGML_ASSERT(tensor_buft_overrides[0].pattern == nullptr);
1447 tensor_buft_overrides_str += "none";
1448 } else {
1449 for (size_t i = 0; i < tensor_buft_overrides.size()-1; i++) {
1450 // Last element of tensor_buft_overrides is always a null pattern
1451 if (tensor_buft_overrides[i].pattern == nullptr) {
1452 tensor_buft_overrides_str += "none";
1453 } else {
1454 tensor_buft_overrides_str += tensor_buft_overrides[i].pattern;
1455 tensor_buft_overrides_str += "=";
1456 tensor_buft_overrides_str += ggml_backend_buft_name(buft: tensor_buft_overrides[i].buft);
1457 }
1458 if (i + 2 < tensor_buft_overrides.size()) {
1459 tensor_buft_overrides_str += ";";
1460 }
1461 }
1462 }
1463 std::vector<std::string> values = { build_commit,
1464 std::to_string(val: build_number),
1465 cpu_info,
1466 gpu_info,
1467 get_backend(),
1468 model_filename,
1469 model_type,
1470 std::to_string(val: model_size),
1471 std::to_string(val: model_n_params),
1472 std::to_string(val: n_batch),
1473 std::to_string(val: n_ubatch),
1474 std::to_string(val: n_threads),
1475 cpu_mask,
1476 std::to_string(val: cpu_strict),
1477 std::to_string(val: poll),
1478 ggml_type_name(type: type_k),
1479 ggml_type_name(type: type_v),
1480 std::to_string(val: n_gpu_layers),
1481 std::to_string(val: n_cpu_moe),
1482 split_mode_str(mode: split_mode),
1483 std::to_string(val: main_gpu),
1484 std::to_string(val: no_kv_offload),
1485 std::to_string(val: flash_attn),
1486 devices_to_string(devices),
1487 tensor_split_str,
1488 tensor_buft_overrides_str,
1489 std::to_string(val: use_mmap),
1490 std::to_string(val: embeddings),
1491 std::to_string(val: no_op_offload),
1492 std::to_string(val: no_host),
1493 std::to_string(val: n_prompt),
1494 std::to_string(val: n_gen),
1495 std::to_string(val: n_depth),
1496 test_time,
1497 std::to_string(val: avg_ns()),
1498 std::to_string(val: stdev_ns()),
1499 std::to_string(val: avg_ts()),
1500 std::to_string(val: stdev_ts()) };
1501 return values;
1502 }
1503
1504 std::map<std::string, std::string> get_map() const {
1505 std::map<std::string, std::string> map;
1506 auto fields = get_fields();
1507 auto values = get_values();
1508 std::transform(first1: fields.begin(), last1: fields.end(), first2: values.begin(), result: std::inserter(x&: map, i: map.end()),
1509 binary_op: std::make_pair<const std::string &, const std::string &>);
1510 return map;
1511 }
1512};
1513
1514const std::string test::build_commit = LLAMA_COMMIT;
1515const int test::build_number = LLAMA_BUILD_NUMBER;
1516
1517struct printer {
1518 virtual ~printer() {}
1519
1520 FILE * fout;
1521
1522 virtual void print_header(const cmd_params & params) { (void) params; }
1523
1524 virtual void print_test(const test & t) = 0;
1525
1526 virtual void print_footer() {}
1527};
1528
1529struct csv_printer : public printer {
1530 static std::string escape_csv(const std::string & field) {
1531 std::string escaped = "\"";
1532 for (auto c : field) {
1533 if (c == '"') {
1534 escaped += "\"";
1535 }
1536 escaped += c;
1537 }
1538 escaped += "\"";
1539 return escaped;
1540 }
1541
1542 void print_header(const cmd_params & params) override {
1543 std::vector<std::string> fields = test::get_fields();
1544 fprintf(stream: fout, format: "%s\n", join(values: fields, delim: ",").c_str());
1545 (void) params;
1546 }
1547
1548 void print_test(const test & t) override {
1549 std::vector<std::string> values = t.get_values();
1550 std::transform(first: values.begin(), last: values.end(), result: values.begin(), unary_op: escape_csv);
1551 fprintf(stream: fout, format: "%s\n", join(values, delim: ",").c_str());
1552 }
1553};
1554
1555static std::string escape_json(const std::string & value) {
1556 std::string escaped;
1557 for (auto c : value) {
1558 if (c == '"') {
1559 escaped += "\\\"";
1560 } else if (c == '\\') {
1561 escaped += "\\\\";
1562 } else if (c <= 0x1f) {
1563 char buf[8];
1564 snprintf(s: buf, maxlen: sizeof(buf), format: "\\u%04x", c);
1565 escaped += buf;
1566 } else {
1567 escaped += c;
1568 }
1569 }
1570 return escaped;
1571}
1572
1573static std::string format_json_value(const std::string & field, const std::string & value) {
1574 switch (test::get_field_type(field)) {
1575 case test::STRING:
1576 return "\"" + escape_json(value) + "\"";
1577 case test::BOOL:
1578 return value == "0" ? "false" : "true";
1579 default:
1580 return value;
1581 }
1582}
1583
1584struct json_printer : public printer {
1585 bool first = true;
1586
1587 void print_header(const cmd_params & params) override {
1588 fprintf(stream: fout, format: "[\n");
1589 (void) params;
1590 }
1591
1592 void print_fields(const std::vector<std::string> & fields, const std::vector<std::string> & values) {
1593 assert(fields.size() == values.size());
1594 for (size_t i = 0; i < fields.size(); i++) {
1595 fprintf(stream: fout, format: " \"%s\": %s,\n", fields.at(n: i).c_str(),
1596 format_json_value(field: fields.at(n: i), value: values.at(n: i)).c_str());
1597 }
1598 }
1599
1600 void print_test(const test & t) override {
1601 if (first) {
1602 first = false;
1603 } else {
1604 fprintf(stream: fout, format: ",\n");
1605 }
1606 fprintf(stream: fout, format: " {\n");
1607 print_fields(fields: test::get_fields(), values: t.get_values());
1608 fprintf(stream: fout, format: " \"samples_ns\": [ %s ],\n", join(values: t.samples_ns, delim: ", ").c_str());
1609 fprintf(stream: fout, format: " \"samples_ts\": [ %s ]\n", join(values: t.get_ts(), delim: ", ").c_str());
1610 fprintf(stream: fout, format: " }");
1611 fflush(stream: fout);
1612 }
1613
1614 void print_footer() override { fprintf(stream: fout, format: "\n]\n"); }
1615};
1616
1617struct jsonl_printer : public printer {
1618 void print_fields(const std::vector<std::string> & fields, const std::vector<std::string> & values) {
1619 assert(fields.size() == values.size());
1620 for (size_t i = 0; i < fields.size(); i++) {
1621 fprintf(stream: fout, format: "\"%s\": %s, ", fields.at(n: i).c_str(), format_json_value(field: fields.at(n: i), value: values.at(n: i)).c_str());
1622 }
1623 }
1624
1625 void print_test(const test & t) override {
1626 fprintf(stream: fout, format: "{");
1627 print_fields(fields: test::get_fields(), values: t.get_values());
1628 fprintf(stream: fout, format: "\"samples_ns\": [ %s ],", join(values: t.samples_ns, delim: ", ").c_str());
1629 fprintf(stream: fout, format: "\"samples_ts\": [ %s ]", join(values: t.get_ts(), delim: ", ").c_str());
1630 fprintf(stream: fout, format: "}\n");
1631 fflush(stream: fout);
1632 }
1633};
1634
1635struct markdown_printer : public printer {
1636 std::vector<std::string> fields;
1637
1638 static int get_field_width(const std::string & field) {
1639 if (field == "model") {
1640 return -30;
1641 }
1642 if (field == "t/s") {
1643 return 20;
1644 }
1645 if (field == "size" || field == "params") {
1646 return 10;
1647 }
1648 if (field == "n_gpu_layers") {
1649 return 3;
1650 }
1651 if (field == "n_threads") {
1652 return 7;
1653 }
1654 if (field == "n_batch") {
1655 return 7;
1656 }
1657 if (field == "n_ubatch") {
1658 return 8;
1659 }
1660 if (field == "type_k" || field == "type_v") {
1661 return 6;
1662 }
1663 if (field == "split_mode") {
1664 return 5;
1665 }
1666 if (field == "flash_attn") {
1667 return 2;
1668 }
1669 if (field == "devices") {
1670 return -12;
1671 }
1672 if (field == "use_mmap") {
1673 return 4;
1674 }
1675 if (field == "test") {
1676 return 15;
1677 }
1678 if (field == "no_op_offload") {
1679 return 4;
1680 }
1681 if (field == "no_host") {
1682 return 4;
1683 }
1684
1685 int width = std::max(a: (int) field.length(), b: 10);
1686
1687 if (test::get_field_type(field) == test::STRING) {
1688 return -width;
1689 }
1690 return width;
1691 }
1692
1693 static std::string get_field_display_name(const std::string & field) {
1694 if (field == "n_gpu_layers") {
1695 return "ngl";
1696 }
1697 if (field == "split_mode") {
1698 return "sm";
1699 }
1700 if (field == "n_threads") {
1701 return "threads";
1702 }
1703 if (field == "no_kv_offload") {
1704 return "nkvo";
1705 }
1706 if (field == "flash_attn") {
1707 return "fa";
1708 }
1709 if (field == "use_mmap") {
1710 return "mmap";
1711 }
1712 if (field == "embeddings") {
1713 return "embd";
1714 }
1715 if (field == "no_op_offload") {
1716 return "nopo";
1717 }
1718 if (field == "no_host") {
1719 return "noh";
1720 }
1721 if (field == "devices") {
1722 return "dev";
1723 }
1724 if (field == "tensor_split") {
1725 return "ts";
1726 }
1727 if (field == "tensor_buft_overrides") {
1728 return "ot";
1729 }
1730 return field;
1731 }
1732
1733 void print_header(const cmd_params & params) override {
1734 // select fields to print
1735 fields.emplace_back(args: "model");
1736 fields.emplace_back(args: "size");
1737 fields.emplace_back(args: "params");
1738 fields.emplace_back(args: "backend");
1739 bool is_cpu_backend = test::get_backend().find(s: "CPU") != std::string::npos ||
1740 test::get_backend().find(s: "BLAS") != std::string::npos;
1741 if (!is_cpu_backend) {
1742 fields.emplace_back(args: "n_gpu_layers");
1743 }
1744 if (params.n_cpu_moe.size() > 1) {
1745 fields.emplace_back(args: "n_cpu_moe");
1746 }
1747 if (params.n_threads.size() > 1 || params.n_threads != cmd_params_defaults.n_threads || is_cpu_backend) {
1748 fields.emplace_back(args: "n_threads");
1749 }
1750 if (params.cpu_mask.size() > 1 || params.cpu_mask != cmd_params_defaults.cpu_mask) {
1751 fields.emplace_back(args: "cpu_mask");
1752 }
1753 if (params.cpu_strict.size() > 1 || params.cpu_strict != cmd_params_defaults.cpu_strict) {
1754 fields.emplace_back(args: "cpu_strict");
1755 }
1756 if (params.poll.size() > 1 || params.poll != cmd_params_defaults.poll) {
1757 fields.emplace_back(args: "poll");
1758 }
1759 if (params.n_batch.size() > 1 || params.n_batch != cmd_params_defaults.n_batch) {
1760 fields.emplace_back(args: "n_batch");
1761 }
1762 if (params.n_ubatch.size() > 1 || params.n_ubatch != cmd_params_defaults.n_ubatch) {
1763 fields.emplace_back(args: "n_ubatch");
1764 }
1765 if (params.type_k.size() > 1 || params.type_k != cmd_params_defaults.type_k) {
1766 fields.emplace_back(args: "type_k");
1767 }
1768 if (params.type_v.size() > 1 || params.type_v != cmd_params_defaults.type_v) {
1769 fields.emplace_back(args: "type_v");
1770 }
1771 if (params.main_gpu.size() > 1 || params.main_gpu != cmd_params_defaults.main_gpu) {
1772 fields.emplace_back(args: "main_gpu");
1773 }
1774 if (params.split_mode.size() > 1 || params.split_mode != cmd_params_defaults.split_mode) {
1775 fields.emplace_back(args: "split_mode");
1776 }
1777 if (params.no_kv_offload.size() > 1 || params.no_kv_offload != cmd_params_defaults.no_kv_offload) {
1778 fields.emplace_back(args: "no_kv_offload");
1779 }
1780 if (params.flash_attn.size() > 1 || params.flash_attn != cmd_params_defaults.flash_attn) {
1781 fields.emplace_back(args: "flash_attn");
1782 }
1783 if (params.devices.size() > 1 || params.devices != cmd_params_defaults.devices) {
1784 fields.emplace_back(args: "devices");
1785 }
1786 if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) {
1787 fields.emplace_back(args: "tensor_split");
1788 }
1789 if (params.tensor_buft_overrides.size() > 1 || !vec_vec_tensor_buft_override_equal(a: params.tensor_buft_overrides, b: cmd_params_defaults.tensor_buft_overrides)) {
1790 fields.emplace_back(args: "tensor_buft_overrides");
1791 }
1792 if (params.use_mmap.size() > 1 || params.use_mmap != cmd_params_defaults.use_mmap) {
1793 fields.emplace_back(args: "use_mmap");
1794 }
1795 if (params.embeddings.size() > 1 || params.embeddings != cmd_params_defaults.embeddings) {
1796 fields.emplace_back(args: "embeddings");
1797 }
1798 if (params.no_op_offload.size() > 1 || params.no_op_offload != cmd_params_defaults.no_op_offload) {
1799 fields.emplace_back(args: "no_op_offload");
1800 }
1801 if (params.no_host.size() > 1 || params.no_host != cmd_params_defaults.no_host) {
1802 fields.emplace_back(args: "no_host");
1803 }
1804 fields.emplace_back(args: "test");
1805 fields.emplace_back(args: "t/s");
1806
1807 fprintf(stream: fout, format: "|");
1808 for (const auto & field : fields) {
1809 fprintf(stream: fout, format: " %*s |", get_field_width(field), get_field_display_name(field).c_str());
1810 }
1811 fprintf(stream: fout, format: "\n");
1812 fprintf(stream: fout, format: "|");
1813 for (const auto & field : fields) {
1814 int width = get_field_width(field);
1815 fprintf(stream: fout, format: " %s%s |", std::string(std::abs(x: width) - 1, '-').c_str(), width > 0 ? ":" : "-");
1816 }
1817 fprintf(stream: fout, format: "\n");
1818 }
1819
1820 void print_test(const test & t) override {
1821 std::map<std::string, std::string> vmap = t.get_map();
1822
1823 fprintf(stream: fout, format: "|");
1824 for (const auto & field : fields) {
1825 std::string value;
1826 char buf[128];
1827 if (field == "model") {
1828 value = t.model_type;
1829 } else if (field == "size") {
1830 if (t.model_size < 1024 * 1024 * 1024) {
1831 snprintf(s: buf, maxlen: sizeof(buf), format: "%.2f MiB", t.model_size / 1024.0 / 1024.0);
1832 } else {
1833 snprintf(s: buf, maxlen: sizeof(buf), format: "%.2f GiB", t.model_size / 1024.0 / 1024.0 / 1024.0);
1834 }
1835 value = buf;
1836 } else if (field == "params") {
1837 if (t.model_n_params < 1000 * 1000 * 1000) {
1838 snprintf(s: buf, maxlen: sizeof(buf), format: "%.2f M", t.model_n_params / 1e6);
1839 } else {
1840 snprintf(s: buf, maxlen: sizeof(buf), format: "%.2f B", t.model_n_params / 1e9);
1841 }
1842 value = buf;
1843 } else if (field == "backend") {
1844 value = test::get_backend();
1845 } else if (field == "test") {
1846 if (t.n_prompt > 0 && t.n_gen == 0) {
1847 snprintf(s: buf, maxlen: sizeof(buf), format: "pp%d", t.n_prompt);
1848 } else if (t.n_gen > 0 && t.n_prompt == 0) {
1849 snprintf(s: buf, maxlen: sizeof(buf), format: "tg%d", t.n_gen);
1850 } else {
1851 snprintf(s: buf, maxlen: sizeof(buf), format: "pp%d+tg%d", t.n_prompt, t.n_gen);
1852 }
1853 if (t.n_depth > 0) {
1854 int len = strlen(s: buf);
1855 snprintf(s: buf + len, maxlen: sizeof(buf) - len, format: " @ d%d", t.n_depth);
1856 }
1857 value = buf;
1858 } else if (field == "t/s") {
1859 snprintf(s: buf, maxlen: sizeof(buf), format: "%.2f ± %.2f", t.avg_ts(), t.stdev_ts());
1860 value = buf;
1861 } else if (vmap.find(x: field) != vmap.end()) {
1862 value = vmap.at(k: field);
1863 } else {
1864 assert(false);
1865 exit(status: 1);
1866 }
1867
1868 int width = get_field_width(field);
1869 if (field == "t/s") {
1870 // HACK: the utf-8 character is 2 bytes
1871 width += 1;
1872 }
1873 fprintf(stream: fout, format: " %*s |", width, value.c_str());
1874 }
1875 fprintf(stream: fout, format: "\n");
1876 }
1877
1878 void print_footer() override {
1879 fprintf(stream: fout, format: "\nbuild: %s (%d)\n", test::build_commit.c_str(), test::build_number);
1880 }
1881};
1882
1883struct sql_printer : public printer {
1884 static std::string get_sql_field_type(const std::string & field) {
1885 switch (test::get_field_type(field)) {
1886 case test::STRING:
1887 return "TEXT";
1888 case test::BOOL:
1889 case test::INT:
1890 return "INTEGER";
1891 case test::FLOAT:
1892 return "REAL";
1893 default:
1894 assert(false);
1895 exit(status: 1);
1896 }
1897 }
1898
1899 void print_header(const cmd_params & params) override {
1900 std::vector<std::string> fields = test::get_fields();
1901 fprintf(stream: fout, format: "CREATE TABLE IF NOT EXISTS llama_bench (\n");
1902 for (size_t i = 0; i < fields.size(); i++) {
1903 fprintf(stream: fout, format: " %s %s%s\n", fields.at(n: i).c_str(), get_sql_field_type(field: fields.at(n: i)).c_str(),
1904 i < fields.size() - 1 ? "," : "");
1905 }
1906 fprintf(stream: fout, format: ");\n");
1907 fprintf(stream: fout, format: "\n");
1908 (void) params;
1909 }
1910
1911 void print_test(const test & t) override {
1912 fprintf(stream: fout, format: "INSERT INTO llama_bench (%s) ", join(values: test::get_fields(), delim: ", ").c_str());
1913 fprintf(stream: fout, format: "VALUES (");
1914 std::vector<std::string> values = t.get_values();
1915 for (size_t i = 0; i < values.size(); i++) {
1916 fprintf(stream: fout, format: "'%s'%s", values.at(n: i).c_str(), i < values.size() - 1 ? ", " : "");
1917 }
1918 fprintf(stream: fout, format: ");\n");
1919 }
1920};
1921
1922struct ctx_state {
1923 int depth = 0; // in tokens
1924
1925 std::vector<uint8_t> buf; // the llama_context state buffer
1926};
1927
1928static bool test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_threads) {
1929 llama_set_n_threads(ctx, n_threads, n_threads_batch: n_threads);
1930
1931 const llama_model * model = llama_get_model(ctx);
1932 const llama_vocab * vocab = llama_model_get_vocab(model);
1933 const int32_t n_vocab = llama_vocab_n_tokens(vocab);
1934
1935 std::vector<llama_token> tokens(n_batch);
1936
1937 int n_processed = 0;
1938
1939 while (n_processed < n_prompt) {
1940 int n_tokens = std::min(a: n_prompt - n_processed, b: n_batch);
1941 tokens[0] = n_processed == 0 && llama_vocab_get_add_bos(vocab) ? llama_vocab_bos(vocab) : std::rand() % n_vocab;
1942 for (int i = 1; i < n_tokens; i++) {
1943 tokens[i] = std::rand() % n_vocab;
1944 }
1945 int res = llama_decode(ctx, batch: llama_batch_get_one(tokens: tokens.data(), n_tokens));
1946 if (res != 0) {
1947 fprintf(stderr, format: "%s: failed to decode prompt batch, res = %d\n", __func__, res);
1948 return false;
1949 }
1950 n_processed += n_tokens;
1951 }
1952
1953 llama_synchronize(ctx);
1954 return true;
1955}
1956
1957static bool test_gen(llama_context * ctx, int n_gen, int n_threads) {
1958 llama_set_n_threads(ctx, n_threads, n_threads_batch: n_threads);
1959
1960 const llama_model * model = llama_get_model(ctx);
1961 const llama_vocab * vocab = llama_model_get_vocab(model);
1962 const int32_t n_vocab = llama_vocab_n_tokens(vocab);
1963
1964 llama_token token = llama_vocab_get_add_bos(vocab) ? llama_vocab_bos(vocab) : std::rand() % n_vocab;
1965
1966 for (int i = 0; i < n_gen; i++) {
1967 int res = llama_decode(ctx, batch: llama_batch_get_one(tokens: &token, n_tokens: 1));
1968 if (res != 0) {
1969 fprintf(stderr, format: "%s: failed to decode generation batch, res = %d\n", __func__, res);
1970 return false;
1971 }
1972 llama_synchronize(ctx);
1973 token = std::rand() % n_vocab;
1974 }
1975 return true;
1976}
1977
1978static void llama_null_log_callback(enum ggml_log_level level, const char * text, void * user_data) {
1979 (void) level;
1980 (void) text;
1981 (void) user_data;
1982}
1983
1984static std::unique_ptr<printer> create_printer(output_formats format) {
1985 switch (format) {
1986 case NONE:
1987 return nullptr;
1988 case CSV:
1989 return std::unique_ptr<printer>(new csv_printer());
1990 case JSON:
1991 return std::unique_ptr<printer>(new json_printer());
1992 case JSONL:
1993 return std::unique_ptr<printer>(new jsonl_printer());
1994 case MARKDOWN:
1995 return std::unique_ptr<printer>(new markdown_printer());
1996 case SQL:
1997 return std::unique_ptr<printer>(new sql_printer());
1998 }
1999 GGML_ABORT("fatal error");
2000}
2001
2002int main(int argc, char ** argv) {
2003 // try to set locale for unicode characters in markdown
2004 setlocale(LC_CTYPE, locale: ".UTF-8");
2005
2006#if !defined(NDEBUG)
2007 fprintf(stderr, "warning: asserts enabled, performance may be affected\n");
2008#endif
2009
2010#if (defined(_MSC_VER) && defined(_DEBUG)) || (!defined(_MSC_VER) && !defined(__OPTIMIZE__))
2011 fprintf(stderr, "warning: debug build, performance may be affected\n");
2012#endif
2013
2014#if defined(__SANITIZE_ADDRESS__) || defined(__SANITIZE_THREAD__)
2015 fprintf(stderr, "warning: sanitizer enabled, performance may be affected\n");
2016#endif
2017
2018 // initialize backends
2019 ggml_backend_load_all();
2020
2021 cmd_params params = parse_cmd_params(argc, argv);
2022
2023 auto * cpu_dev = ggml_backend_dev_by_type(type: GGML_BACKEND_DEVICE_TYPE_CPU);
2024 if (!cpu_dev) {
2025 fprintf(stderr, format: "%s: error: CPU backend is not loaded\n", __func__);
2026 return 1;
2027 }
2028 auto * cpu_reg = ggml_backend_dev_backend_reg(device: cpu_dev);
2029 auto * ggml_threadpool_new_fn = (decltype(ggml_threadpool_new) *) ggml_backend_reg_get_proc_address(reg: cpu_reg, name: "ggml_threadpool_new");
2030 auto * ggml_threadpool_free_fn = (decltype(ggml_threadpool_free) *) ggml_backend_reg_get_proc_address(reg: cpu_reg, name: "ggml_threadpool_free");
2031
2032 // initialize llama.cpp
2033 if (!params.verbose) {
2034 llama_log_set(log_callback: llama_null_log_callback, NULL);
2035 }
2036 llama_backend_init();
2037 llama_numa_init(numa: params.numa);
2038
2039 set_process_priority(params.prio);
2040
2041 // initialize printer
2042 std::unique_ptr<printer> p = create_printer(format: params.output_format);
2043 std::unique_ptr<printer> p_err = create_printer(format: params.output_format_stderr);
2044
2045 if (p) {
2046 p->fout = stdout;
2047 p->print_header(params);
2048 }
2049
2050 if (p_err) {
2051 p_err->fout = stderr;
2052 p_err->print_header(params);
2053 }
2054
2055 std::vector<cmd_params_instance> params_instances = get_cmd_params_instances(params);
2056
2057 llama_model * lmodel = nullptr;
2058 const cmd_params_instance * prev_inst = nullptr;
2059
2060 // store the llama_context state at the previous depth that we performed a test
2061 // ref: https://github.com/ggml-org/llama.cpp/pull/16944#issuecomment-3478151721
2062 ctx_state cstate;
2063
2064 int params_idx = 0;
2065 auto params_count = params_instances.size();
2066 for (const auto & inst : params_instances) {
2067 params_idx++;
2068 if (params.progress) {
2069 fprintf(stderr, format: "llama-bench: benchmark %d/%zu: starting\n", params_idx, params_count);
2070 }
2071 // keep the same model between tests when possible
2072 if (!lmodel || !prev_inst || !inst.equal_mparams(other: *prev_inst)) {
2073 if (lmodel) {
2074 llama_model_free(model: lmodel);
2075 }
2076
2077 lmodel = llama_model_load_from_file(path_model: inst.model.c_str(), params: inst.to_llama_mparams());
2078 if (lmodel == NULL) {
2079 fprintf(stderr, format: "%s: error: failed to load model '%s'\n", __func__, inst.model.c_str());
2080 return 1;
2081 }
2082 prev_inst = &inst;
2083 }
2084
2085 llama_context * ctx = llama_init_from_model(model: lmodel, params: inst.to_llama_cparams());
2086 if (ctx == NULL) {
2087 fprintf(stderr, format: "%s: error: failed to create context with model '%s'\n", __func__, inst.model.c_str());
2088 llama_model_free(model: lmodel);
2089 return 1;
2090 }
2091
2092 test t(inst, lmodel, ctx);
2093
2094 llama_memory_clear(mem: llama_get_memory(ctx), data: false);
2095
2096 // cool off before the test
2097 if (params.delay) {
2098 std::this_thread::sleep_for(rtime: std::chrono::seconds(params.delay));
2099 }
2100
2101 struct ggml_threadpool_params tpp = ggml_threadpool_params_default(n_threads: t.n_threads);
2102 if (!parse_cpu_mask(mask: t.cpu_mask, boolmask&: tpp.cpumask)) {
2103 fprintf(stderr, format: "%s: failed to parse cpu-mask: %s\n", __func__, t.cpu_mask.c_str());
2104 exit(status: 1);
2105 }
2106 tpp.strict_cpu = t.cpu_strict;
2107 tpp.poll = t.poll;
2108 tpp.prio = params.prio;
2109
2110 struct ggml_threadpool * threadpool = ggml_threadpool_new_fn(&tpp);
2111 if (!threadpool) {
2112 fprintf(stderr, format: "%s: threadpool create failed : n_threads %d\n", __func__, tpp.n_threads);
2113 exit(status: 1);
2114 }
2115
2116 llama_attach_threadpool(ctx, threadpool, NULL);
2117
2118 // warmup run
2119 if (!params.no_warmup) {
2120 if (t.n_prompt > 0) {
2121 if (params.progress) {
2122 fprintf(stderr, format: "llama-bench: benchmark %d/%zu: warmup prompt run\n", params_idx, params_count);
2123 }
2124 //test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
2125 bool res = test_prompt(ctx, n_prompt: t.n_prompt, n_batch: t.n_batch, n_threads: t.n_threads);
2126 if (!res) {
2127 fprintf(stderr, format: "%s: error: failed to run prompt warmup\n", __func__);
2128 exit(status: 1);
2129 }
2130 }
2131 if (t.n_gen > 0) {
2132 if (params.progress) {
2133 fprintf(stderr, format: "llama-bench: benchmark %d/%zu: warmup generation run\n", params_idx, params_count);
2134 }
2135 bool res = test_gen(ctx, n_gen: 1, n_threads: t.n_threads);
2136 if (!res) {
2137 fprintf(stderr, format: "%s: error: failed to run gen warmup\n", __func__);
2138 exit(status: 1);
2139 }
2140 }
2141 }
2142
2143 for (int i = 0; i < params.reps; i++) {
2144 llama_memory_clear(mem: llama_get_memory(ctx), data: false);
2145
2146 if (t.n_depth > 0) {
2147 bool is_cached = t.n_depth == cstate.depth;
2148
2149 if (is_cached) {
2150 // if previously we have computed at this depth, just restore the state
2151 const size_t ret = llama_state_seq_set_data(ctx, src: cstate.buf.data(), size: cstate.buf.size(), dest_seq_id: 0);
2152 if (ret == 0) {
2153 // if the old state is incompatible with the current context - reprocess from scratch
2154 is_cached = false;
2155 }
2156 }
2157
2158 if (!is_cached) {
2159 if (params.progress) {
2160 fprintf(stderr, format: "llama-bench: benchmark %d/%zu: depth run %d/%d\n", params_idx, params_count,
2161 i + 1, params.reps);
2162 }
2163 bool res = test_prompt(ctx, n_prompt: t.n_depth, n_batch: t.n_batch, n_threads: t.n_threads);
2164 if (!res) {
2165 fprintf(stderr, format: "%s: error: failed to run depth\n", __func__);
2166 exit(status: 1);
2167 }
2168
2169 // store the context state for reuse in later runs
2170 cstate.depth = t.n_depth;
2171 cstate.buf.resize(new_size: llama_state_seq_get_size(ctx, seq_id: 0));
2172 llama_state_seq_get_data(ctx, dst: cstate.buf.data(), size: cstate.buf.size(), seq_id: 0);
2173 } else {
2174 if (params.progress) {
2175 fprintf(stderr, format: "llama-bench: benchmark %d/%zu: depth run %d/%d (cached)\n", params_idx, params_count,
2176 i + 1, params.reps);
2177 }
2178 }
2179 }
2180
2181 uint64_t t_start = get_time_ns();
2182
2183 if (t.n_prompt > 0) {
2184 if (params.progress) {
2185 fprintf(stderr, format: "llama-bench: benchmark %d/%zu: prompt run %d/%d\n", params_idx, params_count,
2186 i + 1, params.reps);
2187 }
2188 bool res = test_prompt(ctx, n_prompt: t.n_prompt, n_batch: t.n_batch, n_threads: t.n_threads);
2189 if (!res) {
2190 fprintf(stderr, format: "%s: error: failed to run prompt\n", __func__);
2191 exit(status: 1);
2192 }
2193 }
2194 if (t.n_gen > 0) {
2195 if (params.progress) {
2196 fprintf(stderr, format: "llama-bench: benchmark %d/%zu: generation run %d/%d\n", params_idx, params_count,
2197 i + 1, params.reps);
2198 }
2199 bool res = test_gen(ctx, n_gen: t.n_gen, n_threads: t.n_threads);
2200 if (!res) {
2201 fprintf(stderr, format: "%s: error: failed to run gen\n", __func__);
2202 exit(status: 1);
2203 }
2204 }
2205
2206 uint64_t t_ns = get_time_ns() - t_start;
2207 t.samples_ns.push_back(x: t_ns);
2208 }
2209
2210 if (p) {
2211 p->print_test(t);
2212 fflush(stream: p->fout);
2213 }
2214
2215 if (p_err) {
2216 p_err->print_test(t);
2217 fflush(stream: p_err->fout);
2218 }
2219
2220 llama_perf_context_print(ctx);
2221
2222 llama_free(ctx);
2223
2224 ggml_threadpool_free_fn(threadpool);
2225 }
2226
2227 llama_model_free(model: lmodel);
2228
2229 if (p) {
2230 p->print_footer();
2231 }
2232
2233 if (p_err) {
2234 p_err->print_footer();
2235 }
2236
2237 llama_backend_free();
2238
2239 return 0;
2240}
2241