1#include "arg.h"
2#include "common.h"
3#include "log.h"
4#include "llama.h"
5
6#include <algorithm>
7#include <cstdio>
8#include <string>
9#include <vector>
10
11static void print_usage(int, char ** argv) {
12 LOG("\nexample usage:\n");
13 LOG("\n %s -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]\n", argv[0]);
14 LOG("\n");
15}
16
17int main(int argc, char ** argv) {
18 common_params params;
19
20 if (!common_params_parse(argc, argv, params, ex: LLAMA_EXAMPLE_BENCH, print_usage)) {
21 return 1;
22 }
23
24 common_init();
25
26 int is_pp_shared = params.is_pp_shared;
27
28 std::vector<int> n_pp = params.n_pp;
29 std::vector<int> n_tg = params.n_tg;
30 std::vector<int> n_pl = params.n_pl;
31
32 // init LLM
33
34 llama_backend_init();
35 llama_numa_init(numa: params.numa);
36
37 // initialize the model
38
39 llama_model_params model_params = common_model_params_to_llama(params);
40
41 llama_model * model = llama_model_load_from_file(path_model: params.model.path.c_str(), params: model_params);
42
43 if (model == NULL) {
44 fprintf(stderr , format: "%s: error: unable to load model\n" , __func__);
45 return 1;
46 }
47
48 llama_context_params ctx_params = common_context_params_to_llama(params);
49
50 // ensure enough sequences are available
51 ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(first: n_pl.begin(), last: n_pl.end());
52
53 llama_context * ctx = llama_init_from_model(model, params: ctx_params);
54
55 if (ctx == NULL) {
56 fprintf(stderr , format: "%s: error: failed to create the llama_context\n" , __func__);
57 return 1;
58 }
59
60 const llama_vocab * vocab = llama_model_get_vocab(model);
61 const int32_t n_vocab = llama_vocab_n_tokens(vocab);
62
63 const auto get_token_rand = [n_vocab]() -> llama_token {
64 return std::rand() % n_vocab;
65 };
66
67 auto * mem = llama_get_memory(ctx);
68
69 const int32_t n_kv_max = llama_n_ctx(ctx);
70
71 llama_batch batch = llama_batch_init(n_tokens: n_kv_max, embd: 0, n_seq_max: 1);
72
73 // decode in batches of ctx_params.n_batch tokens
74 auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch, bool synchronize) {
75 for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
76 const int32_t n_tokens = std::min(a: n_batch, b: (int32_t) (batch.n_tokens - i));
77
78 llama_batch batch_view = {
79 .n_tokens: n_tokens,
80 .token: batch.token + i,
81 .embd: nullptr,
82 .pos: batch.pos + i,
83 .n_seq_id: batch.n_seq_id + i,
84 .seq_id: batch.seq_id + i,
85 .logits: batch.logits + i,
86 };
87
88 const int ret = llama_decode(ctx, batch: batch_view);
89 if (ret != 0) {
90 LOG_ERR("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret);
91 return false;
92 }
93
94 if (synchronize) {
95 llama_synchronize(ctx);
96 }
97 }
98
99 return true;
100 };
101
102 // warm up
103 {
104 for (int i = 0; i < 16; ++i) {
105 common_batch_add(batch, id: get_token_rand(), pos: i, seq_ids: { 0 }, logits: false);
106 }
107
108 if (!decode_helper(ctx, batch, ctx_params.n_batch, true)) {
109 LOG_ERR("%s: llama_decode() failed\n", __func__);
110 return 1;
111 }
112 }
113
114 if (!params.batched_bench_output_jsonl) {
115 LOG("\n");
116 LOG("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, is_pp_shared = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, int(params.flash_attn_type), params.is_pp_shared, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
117 LOG("\n");
118 LOG("|%6s | %6s | %4s | %6s | %8s | %8s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "B", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s", "T s", "S t/s");
119 LOG("|%6s-|-%6s-|-%4s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "----", "------", "--------", "--------", "--------", "--------", "--------", "--------");
120 }
121
122 for ( int i_pp = 0; i_pp < (int) n_pp.size(); ++i_pp) {
123 for ( int i_tg = 0; i_tg < (int) n_tg.size(); ++i_tg) {
124 for (int i_pl = 0; i_pl < (int) n_pl.size(); ++i_pl) {
125 const int pp = n_pp[i_pp];
126 const int tg = n_tg[i_tg];
127 const int pl = n_pl[i_pl];
128
129 const int n_ctx_req = is_pp_shared ? (params.kv_unified ? pp : pl*pp) + pl*tg : pl*(pp + tg);
130
131 if (n_ctx_req > n_kv_max) {
132 continue;
133 }
134
135 common_batch_clear(batch);
136
137 for (int j = 0; j < (is_pp_shared ? 1 : pl); ++j) {
138 for (int i = 0; i < pp; ++i) {
139 common_batch_add(batch, id: get_token_rand(), pos: i, seq_ids: { j }, logits: i == pp - 1);
140 }
141 }
142
143 llama_memory_clear(mem, data: false);
144
145 const auto t_pp_start = ggml_time_us();
146
147 if (!decode_helper(ctx, batch, ctx_params.n_batch, false)) {
148 LOG_ERR("%s: llama_decode() failed\n", __func__);
149 return 1;
150 }
151
152 llama_synchronize(ctx);
153
154 const auto t_pp_end = ggml_time_us();
155
156 if (is_pp_shared) {
157 for (int32_t i = 1; i < pl; ++i) {
158 llama_memory_seq_cp(mem, seq_id_src: 0, seq_id_dst: i, p0: -1, p1: -1);
159 }
160
161 if (!params.kv_unified) {
162 // run one dummy token to apply the memory copy
163 common_batch_clear(batch);
164 common_batch_add(batch, id: get_token_rand(), pos: pp + 0, seq_ids: { 0 }, logits: true);
165 if (!decode_helper(ctx, batch, ctx_params.n_batch, true)) {
166 LOG_ERR("%s: llama_decode() failed\n", __func__);
167 return 1;
168 }
169 llama_memory_seq_rm(mem, seq_id: 0, p0: pp, p1: -1);
170 }
171 }
172
173 const auto t_tg_start = ggml_time_us();
174
175 for (int i = 0; i < tg; ++i) {
176 common_batch_clear(batch);
177
178 for (int j = 0; j < pl; ++j) {
179 common_batch_add(batch, id: get_token_rand(), pos: pp + i, seq_ids: { j }, logits: true);
180 }
181
182 if (!decode_helper(ctx, batch, ctx_params.n_batch, true)) {
183 LOG_ERR("%s: llama_decode() failed\n", __func__);
184 return 1;
185 }
186 }
187
188 const auto t_tg_end = ggml_time_us();
189
190 const int32_t n_kv = n_ctx_req;
191
192 const float t_pp = (t_pp_end - t_pp_start) / 1000000.0f;
193 const float t_tg = (t_tg_end - t_tg_start) / 1000000.0f;
194 const float t = t_pp + t_tg;
195
196 const float speed_pp = is_pp_shared ? pp / t_pp : pl*pp / t_pp;
197 const float speed_tg = pl*tg / t_tg;
198 const float speed = ((is_pp_shared ? pp : pl*pp) + pl*tg) / t;
199
200 if(params.batched_bench_output_jsonl) {
201 LOG(
202 "{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn\": %d, \"is_pp_shared\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, "
203 "\"pp\": %d, \"tg\": %d, \"pl\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f, \"t\": %f, \"speed\": %f}\n",
204 n_kv_max, params.n_batch, params.n_ubatch, int(params.flash_attn_type), params.is_pp_shared, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch,
205 pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed
206 );
207 } else {
208 LOG("|%6d | %6d | %4d | %6d | %8.3f | %8.2f | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed);
209 }
210 }
211 }
212 }
213
214 LOG("\n");
215 llama_perf_context_print(ctx);
216
217 llama_batch_free(batch);
218
219 llama_free(ctx);
220 llama_model_free(model);
221
222 llama_backend_free();
223
224 return 0;
225}
226