1#include "ggml-alloc.h"
2#include "ggml-backend-impl.h"
3#include "ggml.h"
4#include "ggml-impl.h"
5#include <assert.h>
6#include <limits.h>
7#include <stdarg.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11
12#define MAX(a, b) ((a) > (b) ? (a) : (b))
13#define MAX_FREE_BLOCKS 256
14
15//#define GGML_ALLOCATOR_DEBUG
16
17//#define AT_PRINTF(...) GGML_LOG_DEBUG(__VA_ARGS__)
18#define AT_PRINTF(...)
19
20
21static bool ggml_is_view(const struct ggml_tensor * t) {
22 return t->view_src != NULL;
23}
24
25// ops that return true for this function must not use restrict pointers for their backend implementations
26bool ggml_op_can_inplace(enum ggml_op op) {
27 switch (op) {
28 case GGML_OP_SCALE:
29 case GGML_OP_DIAG_MASK_ZERO:
30 case GGML_OP_DIAG_MASK_INF:
31 case GGML_OP_ADD:
32 case GGML_OP_ADD_ID:
33 case GGML_OP_ADD1:
34 case GGML_OP_SUB:
35 case GGML_OP_MUL:
36 case GGML_OP_DIV:
37 case GGML_OP_SQR:
38 case GGML_OP_SQRT:
39 case GGML_OP_LOG:
40 case GGML_OP_UNARY:
41 case GGML_OP_ROPE:
42 case GGML_OP_ROPE_BACK:
43 case GGML_OP_SILU_BACK:
44 case GGML_OP_RMS_NORM:
45 case GGML_OP_RMS_NORM_BACK:
46 case GGML_OP_SOFT_MAX:
47 case GGML_OP_SOFT_MAX_BACK:
48 return true;
49
50 default:
51 return false;
52 }
53}
54
55static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
56 assert(alignment && !(alignment & (alignment - 1))); // power of 2
57 size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
58 return offset + align;
59}
60
61// tallocr
62
63struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) {
64 void * base = ggml_backend_buffer_get_base(buffer);
65 size_t align = ggml_backend_buffer_get_alignment(buffer);
66
67 assert(align && !(align & (align - 1))); // power of 2
68
69 struct ggml_tallocr talloc = (struct ggml_tallocr) {
70 /*.buffer = */ buffer,
71 /*.base = */ base,
72 /*.alignment = */ align,
73 /*.offset = */ aligned_offset(buffer: base, offset: 0, alignment: align),
74 };
75 return talloc;
76}
77
78enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) {
79 size_t size = ggml_backend_buffer_get_alloc_size(buffer: talloc->buffer, tensor);
80 size = GGML_PAD(size, talloc->alignment);
81
82 if (talloc->offset + size > ggml_backend_buffer_get_size(buffer: talloc->buffer)) {
83 GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n",
84 __func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset);
85 GGML_ABORT("not enough space in the buffer");
86 }
87
88 void * addr = (char *)ggml_backend_buffer_get_base(buffer: talloc->buffer) + talloc->offset;
89 talloc->offset += size;
90
91 assert(((uintptr_t)addr % talloc->alignment) == 0);
92
93 return ggml_backend_tensor_alloc(buffer: talloc->buffer, tensor, addr);
94}
95
96// dynamic tensor allocator
97
98#define GGML_VBUFFER_MAX_CHUNKS 16
99
100// relative memory address within an allocation that can be split into multiple buffers (chunks)
101struct buffer_address {
102 int chunk; // index of a backend buffer
103 size_t offset; // local memory offset within the buffer
104};
105
106static const struct buffer_address GGML_BUFFER_ADDRESS_INVALID = { -1, SIZE_MAX };
107
108static bool ggml_buffer_address_less(struct buffer_address a, struct buffer_address b) {
109 return a.chunk != b.chunk ? a.chunk < b.chunk : a.offset < b.offset;
110}
111
112struct free_block {
113 size_t offset;
114 size_t size;
115};
116
117struct tallocr_chunk {
118 struct free_block free_blocks[MAX_FREE_BLOCKS];
119 int n_free_blocks;
120 size_t max_size;
121};
122
123struct ggml_dyn_tallocr {
124 size_t alignment;
125 size_t max_chunk_size;
126 struct tallocr_chunk * chunks[GGML_VBUFFER_MAX_CHUNKS];
127 int n_chunks;
128
129#ifdef GGML_ALLOCATOR_DEBUG
130 struct {
131 const struct ggml_tensor * tensor;
132 struct buffer_address addr;
133 } allocated_tensors[1024];
134#endif
135};
136
137static void ggml_dyn_tallocr_insert_block(struct tallocr_chunk * chunk, size_t offset, size_t size) {
138 GGML_ASSERT(chunk->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
139 // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
140 int insert_pos = 0;
141 while (insert_pos < chunk->n_free_blocks && chunk->free_blocks[insert_pos].offset < offset) {
142 insert_pos++;
143 }
144 // shift all blocks from insert_pos onward to make room for the new block
145 for (int i = chunk->n_free_blocks; i > insert_pos; i--) {
146 chunk->free_blocks[i] = chunk->free_blocks[i-1];
147 }
148 // insert the new block
149 chunk->free_blocks[insert_pos].offset = offset;
150 chunk->free_blocks[insert_pos].size = size;
151 chunk->n_free_blocks++;
152}
153
154static void ggml_dyn_tallocr_remove_block(struct tallocr_chunk * chunk, int idx) {
155 // shift all elements after idx by 1 to the left, overwriting the element at idx
156 for (int i = idx; i < chunk->n_free_blocks; i++) {
157 chunk->free_blocks[i] = chunk->free_blocks[i+1];
158 }
159 chunk->n_free_blocks--;
160}
161
162static int ggml_dyn_tallocr_new_chunk(struct ggml_dyn_tallocr * alloc, size_t min_size) {
163 if (alloc->n_chunks >= GGML_VBUFFER_MAX_CHUNKS) {
164 return -1;
165 }
166 struct tallocr_chunk * chunk = calloc(nmemb: 1, size: sizeof(struct tallocr_chunk));
167 chunk->n_free_blocks = 1;
168 chunk->free_blocks[0].offset = 0;
169 // available space in a chunk is limited to max_chunk_size, but can be higher if:
170 // 1. a single tensor exceeds the maximum, and cannot fit any other way
171 // 2. we are running out of chunks
172 // backends will either manage to allocate the larger size, or report an error.
173 chunk->free_blocks[0].size = MAX(min_size, alloc->max_chunk_size);
174 if (alloc->n_chunks == GGML_VBUFFER_MAX_CHUNKS - 1) {
175 chunk->free_blocks[0].size = SIZE_MAX/2;
176 }
177 alloc->chunks[alloc->n_chunks] = chunk;
178 alloc->n_chunks++;
179 return alloc->n_chunks - 1;
180}
181
182#ifdef GGML_ALLOCATOR_DEBUG
183static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
184 for (int i = 0; i < 1024; i++) {
185 if (alloc->allocated_tensors[i].tensor == NULL) {
186 alloc->allocated_tensors[i].tensor = tensor;
187 alloc->allocated_tensors[i].addr = addr;
188 return;
189 }
190 }
191 GGML_ABORT("out of allocated_tensors");
192}
193static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
194 for (int i = 0; i < 1024; i++) {
195 if (alloc->allocated_tensors[i].addr.chunk == addr.chunk && alloc->allocated_tensors[i].addr.offset == addr.offset) {
196 alloc->allocated_tensors[i].tensor = NULL;
197 return;
198 }
199 }
200 GGML_ABORT("tried to free tensor %s not found\n", tensor->name);
201}
202#endif
203
204static struct buffer_address ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) {
205 size = aligned_offset(NULL, offset: size, alignment: alloc->alignment);
206
207 AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
208
209 int best_fit_chunk = -1;
210 int best_fit_block = -1;
211 size_t max_avail = 0;
212
213 // find the best fitting free block besides the last block, within any chunk
214 for (int c = 0; c < alloc->n_chunks; ++c) {
215 struct tallocr_chunk * chunk = alloc->chunks[c];
216 size_t best_fit_size = SIZE_MAX;
217 for (int i = 0; i < chunk->n_free_blocks - 1; i++) {
218 struct free_block * block = &chunk->free_blocks[i];
219 max_avail = MAX(max_avail, block->size);
220 if (block->size >= size && block->size <= best_fit_size) {
221 best_fit_chunk = c;
222 best_fit_block = i;
223 best_fit_size = block->size;
224 }
225 }
226 }
227
228 if (best_fit_block == -1) {
229 // no suitable block found, try the last block (this may grow a chunks size)
230 int64_t best_reuse = INT64_MIN;
231 for (int c = 0; c < alloc->n_chunks; ++c) {
232 struct tallocr_chunk * chunk = alloc->chunks[c];
233 if (chunk->n_free_blocks > 0) {
234 struct free_block * block = &chunk->free_blocks[chunk->n_free_blocks - 1];
235 max_avail = MAX(max_avail, block->size);
236 int64_t reuse_factor = chunk->max_size - block->offset - size;
237 // reuse_factor < 0 : amount of extra memory that needs to be allocated
238 // reuse_factor = 0 : allocated free space exactly matches tensor size
239 // reuse_factor > 0 : superfluous memory that will remain unused
240 bool better_reuse = best_reuse < 0 && reuse_factor > best_reuse;
241 bool better_fit = reuse_factor >= 0 && reuse_factor < best_reuse;
242 if (block->size >= size && (better_reuse || better_fit)) {
243 best_fit_chunk = c;
244 best_fit_block = chunk->n_free_blocks - 1;
245 best_reuse = reuse_factor;
246 }
247 }
248 }
249 }
250
251 if (best_fit_block == -1) {
252 // none of the existing chunks have enough space left
253 best_fit_chunk = ggml_dyn_tallocr_new_chunk(alloc, min_size: size);
254 best_fit_block = 0;
255 }
256 if (best_fit_chunk == -1) {
257 // since the last chunk always has virtually endless memory, this should never happen
258 GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
259 __func__, size, max_avail);
260 GGML_ABORT("graph allocation: failed to reserve memory");
261 }
262
263 struct tallocr_chunk * chunk = alloc->chunks[best_fit_chunk];
264 struct free_block * block = &chunk->free_blocks[best_fit_block];
265 struct buffer_address addr = {.chunk = best_fit_chunk, .offset = block->offset };
266 block->offset += size;
267 block->size -= size;
268 if (block->size == 0) {
269 // remove block if empty
270 ggml_dyn_tallocr_remove_block(chunk, idx: best_fit_block);
271 }
272
273 AT_PRINTF("block %d, offset %zu, chunk %d\n", best_fit_block, addr.offset, addr.chunk);
274
275#ifdef GGML_ALLOCATOR_DEBUG
276 add_allocated_tensor(alloc, addr, tensor);
277 size_t cur_max = addr.offset + size;
278 if (cur_max > chunk->max_size) {
279 // sort allocated_tensors by chunk/offset
280 for (int i = 0; i < 1024; i++) {
281 for (int j = i + 1; j < 1024; j++) {
282 if (ggml_buffer_address_less(alloc->allocated_tensors[j].addr, alloc->allocated_tensors[i].addr)) {
283 const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
284 struct buffer_address tmp_addr = alloc->allocated_tensors[i].addr;
285 alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
286 alloc->allocated_tensors[i].addr = alloc->allocated_tensors[j].addr;
287 alloc->allocated_tensors[j].tensor = tmp_tensor;
288 alloc->allocated_tensors[j].addr = tmp_addr;
289 }
290 }
291 }
292 GGML_LOG_DEBUG("max_size[%d] = %.2f MB: tensors: ", addr.chunk, cur_max / 1024.0 / 1024.0);
293 for (int i = 0; i < 1024; i++) {
294 if (alloc->allocated_tensors[i].tensor) {
295 GGML_LOG_DEBUG("%s [%d: %zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
296 alloc->allocated_tensors[i].addr.chunk,
297 alloc->allocated_tensors[i].addr.offset,
298 alloc->allocated_tensors[i].addr.offset + ggml_nbytes(alloc->allocated_tensors[i].tensor),
299 ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
300 }
301 }
302 GGML_LOG_DEBUG("\n");
303 }
304#endif
305
306 chunk->max_size = MAX(chunk->max_size, addr.offset + size);
307
308 return addr;
309
310 GGML_UNUSED(tensor);
311}
312
313// this is a very naive implementation, but for our case the number of free blocks should be very small
314static void ggml_dyn_tallocr_free_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, size_t size, const struct ggml_tensor * tensor) {
315 size = aligned_offset(NULL, offset: size, alignment: alloc->alignment);
316
317 AT_PRINTF("%s: freeing %s at {chunk=%d, offset=%zu} (%zu bytes) - n_free_blocks = %d\n",
318 __func__, tensor->name, addr.chunk, addr.offset, size, alloc->chunks[addr.chunk]->n_free_blocks);
319
320#ifdef GGML_ALLOCATOR_DEBUG
321 remove_allocated_tensor(alloc, addr, tensor);
322#endif
323
324 struct tallocr_chunk * chunk = alloc->chunks[addr.chunk];
325
326 // see if we can merge with an existing block
327 for (int i = 0; i < chunk->n_free_blocks; i++) {
328 struct free_block * block = &chunk->free_blocks[i];
329 // check if ptr is at the end of the block
330 if (block->offset + block->size == addr.offset) {
331 block->size += size;
332 // check if we can merge with the next block
333 if (i < chunk->n_free_blocks - 1) {
334 struct free_block * next = &chunk->free_blocks[i+1];
335 if (block->offset + block->size == next->offset) {
336 block->size += next->size;
337 ggml_dyn_tallocr_remove_block(chunk, idx: i+1);
338 }
339 }
340 return;
341 }
342 // check if ptr is at the beginning of the block
343 if (addr.offset + size == block->offset) {
344 block->offset = addr.offset;
345 block->size += size;
346 // check if we can merge with the previous block
347 if (i > 0) {
348 struct free_block * prev = &chunk->free_blocks[i-1];
349 if (prev->offset + prev->size == block->offset) {
350 prev->size += block->size;
351 ggml_dyn_tallocr_remove_block(chunk, idx: i);
352 }
353 }
354 return;
355 }
356 }
357 // otherwise, add a new block
358 ggml_dyn_tallocr_insert_block(chunk, offset: addr.offset, size);
359
360 GGML_UNUSED(tensor);
361}
362
363static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) {
364 for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; i++) {
365 free(ptr: alloc->chunks[i]);
366 alloc->chunks[i] = NULL;
367 }
368 alloc->n_chunks = 0;
369
370#ifdef GGML_ALLOCATOR_DEBUG
371 for (int i = 0; i < 1024; i++) {
372 alloc->allocated_tensors[i].tensor = NULL;
373 }
374#endif
375}
376
377static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment, size_t max_buffer_size) {
378 struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(size: sizeof(struct ggml_dyn_tallocr));
379
380 *alloc = (struct ggml_dyn_tallocr) {
381 /*.alignment = */ alignment,
382 /*.max_chunk_size = */ MIN(max_buffer_size, SIZE_MAX/2), // clamp to avoid overflows
383 /*.chunks = */ {NULL},
384 /*.n_chunks = */ 0,
385#ifdef GGML_ALLOCATOR_DEBUG
386 /*.allocated_tensors = */ {{0}},
387#endif
388 };
389
390 ggml_dyn_tallocr_reset(alloc);
391
392 return alloc;
393}
394
395static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) {
396 for (int i = 0; i < alloc->n_chunks; ++i) {
397 free(ptr: alloc->chunks[i]);
398 }
399 free(ptr: alloc);
400}
401
402static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc, int chunk) {
403 return chunk < alloc->n_chunks ? alloc->chunks[chunk]->max_size : 0;
404}
405
406
407// virtual buffer with contiguous memory range, split into multiple backend buffers (chunks)
408
409struct vbuffer {
410 ggml_backend_buffer_t chunks[GGML_VBUFFER_MAX_CHUNKS];
411};
412
413static void ggml_vbuffer_free(struct vbuffer * buf) {
414 if (buf == NULL) {
415 return;
416 }
417 for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; ++i) {
418 ggml_backend_buffer_free(buffer: buf->chunks[i]);
419 }
420 free(ptr: buf);
421}
422
423static size_t ggml_vbuffer_chunk_size(struct vbuffer * buf, int chunk) {
424 return buf->chunks[chunk] ? ggml_backend_buffer_get_size(buffer: buf->chunks[chunk]) : 0;
425}
426
427static size_t ggml_vbuffer_size(struct vbuffer * buf) {
428 size_t size = 0;
429 for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
430 size += ggml_backend_buffer_get_size(buffer: buf->chunks[i]);
431 }
432 return size;
433}
434
435static struct vbuffer * ggml_vbuffer_alloc(ggml_backend_buffer_type_t buft, const struct ggml_dyn_tallocr * talloc, enum ggml_backend_buffer_usage usage) {
436 struct vbuffer * buf = (struct vbuffer *)calloc(nmemb: 1, size: sizeof(struct vbuffer));
437 if (buf == NULL) {
438 return NULL;
439 }
440
441 for (int n = 0; n < talloc->n_chunks; n++) {
442 size_t chunk_size = talloc->chunks[n]->max_size;
443 buf->chunks[n] = ggml_backend_buft_alloc_buffer(buft, size: chunk_size);
444 if (buf->chunks[n] == NULL) {
445 ggml_vbuffer_free(buf);
446 return NULL;
447 }
448 ggml_backend_buffer_set_usage(buffer: buf->chunks[n], usage);
449 }
450 return buf;
451}
452
453static void ggml_vbuffer_tensor_alloc(struct vbuffer * buf, struct ggml_tensor * tensor, struct buffer_address buf_addr) {
454 void * base = ggml_backend_buffer_get_base(buffer: buf->chunks[buf_addr.chunk]);
455 void * addr = (char *)base + buf_addr.offset;
456 ggml_backend_tensor_alloc(buffer: buf->chunks[buf_addr.chunk], tensor, addr);
457}
458
459static void ggml_vbuffer_reset(struct vbuffer * buf) {
460 for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
461 ggml_backend_buffer_reset(buffer: buf->chunks[i]);
462 }
463}
464
465
466/////////////////////////////////////
467
468// graph allocator
469
470struct hash_node {
471 int n_children;
472 int n_views;
473 int buffer_id;
474 struct buffer_address addr;
475 bool allocated;
476};
477
478struct tensor_alloc {
479 int buffer_id;
480 struct buffer_address addr;
481 size_t size_max; // 0 = pre-allocated, unused, or view
482};
483
484struct leaf_alloc {
485 struct tensor_alloc leaf;
486};
487
488struct node_alloc {
489 struct tensor_alloc dst;
490 struct tensor_alloc src[GGML_MAX_SRC];
491};
492
493struct ggml_gallocr {
494 ggml_backend_buffer_type_t * bufts; // [n_buffers]
495 struct vbuffer ** buffers; // [n_buffers]
496 struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
497 int n_buffers;
498
499 struct ggml_hash_set hash_set;
500 struct hash_node * hash_values; // [hash_set.size]
501
502 struct node_alloc * node_allocs; // [n_nodes]
503 int n_nodes;
504
505 struct leaf_alloc * leaf_allocs; // [n_leafs]
506 int n_leafs;
507};
508
509ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) {
510 ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(nmemb: 1, size: sizeof(struct ggml_gallocr));
511 GGML_ASSERT(galloc != NULL);
512
513 galloc->bufts = calloc(nmemb: n_bufs, size: sizeof(ggml_backend_buffer_type_t));
514 GGML_ASSERT(galloc->bufts != NULL);
515
516 galloc->buffers = calloc(nmemb: n_bufs, size: sizeof(struct vbuffer *));
517 GGML_ASSERT(galloc->buffers != NULL);
518
519 galloc->buf_tallocs = calloc(nmemb: n_bufs, size: sizeof(struct ggml_dyn_tallocr *));
520 GGML_ASSERT(galloc->buf_tallocs != NULL);
521
522 for (int i = 0; i < n_bufs; i++) {
523 galloc->bufts[i] = bufts[i];
524 galloc->buffers[i] = NULL;
525
526 // check if the same buffer type is used multiple times and reuse the same allocator
527 for (int j = 0; j < i; j++) {
528 if (bufts[i] == bufts[j]) {
529 galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
530 break;
531 }
532 }
533
534 if (galloc->buf_tallocs[i] == NULL) {
535 size_t alignment = ggml_backend_buft_get_alignment(buft: bufts[i]);
536 size_t max_size = ggml_backend_buft_get_max_size(buft: bufts[i]);
537 galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment, max_buffer_size: max_size);
538 }
539 }
540 galloc->n_buffers = n_bufs;
541
542 return galloc;
543}
544
545ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft) {
546 return ggml_gallocr_new_n(bufts: &buft, n_bufs: 1);
547}
548
549void ggml_gallocr_free(ggml_gallocr_t galloc) {
550 if (galloc == NULL) {
551 return;
552 }
553
554 for (int i = 0; i < galloc->n_buffers; i++) {
555 if (galloc->buffers != NULL) {
556 // skip if already freed
557 bool freed = false;
558 for (int j = 0; j < i; j++) {
559 if (galloc->buffers[j] == galloc->buffers[i]) {
560 freed = true;
561 break;
562 }
563 }
564 if (!freed) {
565 ggml_vbuffer_free(buf: galloc->buffers[i]);
566 }
567 }
568 if (galloc->buf_tallocs != NULL) {
569 // skip if already freed
570 bool freed = false;
571 for (int j = 0; j < i; j++) {
572 if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
573 freed = true;
574 break;
575 }
576 }
577 if (!freed) {
578 ggml_dyn_tallocr_free(alloc: galloc->buf_tallocs[i]);
579 }
580 }
581 }
582
583 ggml_hash_set_free(hash_set: &galloc->hash_set);
584 free(ptr: galloc->hash_values);
585 free(ptr: galloc->bufts);
586 free(ptr: galloc->buffers);
587 free(ptr: galloc->buf_tallocs);
588 free(ptr: galloc->node_allocs);
589 free(ptr: galloc->leaf_allocs);
590 free(ptr: galloc);
591}
592
593typedef struct ggml_gallocr * ggml_gallocr_t;
594
595static struct hash_node * ggml_gallocr_hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) {
596 size_t i = ggml_hash_find_or_insert(hash_set: &galloc->hash_set, key: t);
597 return &galloc->hash_values[i];
598}
599
600static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) {
601 return ggml_gallocr_hash_get(galloc, t)->allocated;
602}
603
604static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) {
605 return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated;
606}
607
608// free the extra space at the end if the new tensor is smaller
609static void ggml_gallocr_free_extra_space(ggml_gallocr_t galloc, struct ggml_tensor * node, struct ggml_tensor * parent) {
610 struct hash_node * hn = ggml_gallocr_hash_get(galloc, t: node);
611 struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, t: parent);
612
613 size_t parent_size = ggml_backend_buft_get_alloc_size(buft: galloc->bufts[p_hn->buffer_id], tensor: parent);
614 size_t node_size = ggml_backend_buft_get_alloc_size(buft: galloc->bufts[hn->buffer_id], tensor: node);
615
616 GGML_ASSERT(parent_size >= node_size);
617
618 if (parent_size > node_size) {
619 struct ggml_dyn_tallocr * p_alloc = galloc->buf_tallocs[p_hn->buffer_id];
620 struct buffer_address p_addr = p_hn->addr;
621 p_addr.offset += node_size;
622 size_t extra_size = parent_size - node_size;
623 AT_PRINTF("freeing extra %zu bytes from parent %s for %s\n", extra_size, parent->name, node->name);
624 ggml_dyn_tallocr_free_tensor(alloc: p_alloc, addr: p_addr, size: extra_size, tensor: parent);
625 }
626}
627
628static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) {
629 GGML_ASSERT(buffer_id >= 0);
630 struct hash_node * hn = ggml_gallocr_hash_get(galloc, t: node);
631
632 if (!ggml_gallocr_is_allocated(galloc, t: node) && !ggml_is_view(t: node)) {
633 hn->allocated = true;
634 assert(hn->addr.offset == 0);
635
636 // try to reuse a parent's buffer (inplace)
637 if (ggml_op_can_inplace(op: node->op)) {
638 for (int i = 0; i < GGML_MAX_SRC; i++) {
639 struct ggml_tensor * parent = node->src[i];
640 if (parent == NULL) {
641 continue;
642 }
643
644 // if the node's data is external, then we cannot re-use it
645 if (!ggml_gallocr_is_own(galloc, t: parent)) {
646 AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
647 continue;
648 }
649
650 // outputs cannot be reused
651 if (parent->flags & GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & GGML_TENSOR_FLAG_OUTPUT)) {
652 AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name);
653 continue;
654 }
655
656 if (!ggml_are_same_layout(a: node, b: parent)) {
657 AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name);
658 continue;
659 }
660
661 struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, t: parent);
662 if (p_hn->n_children == 1 && p_hn->n_views == 0) {
663 if (ggml_is_view(t: parent)) {
664 struct ggml_tensor * view_src = parent->view_src;
665 struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, t: view_src);
666 if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
667 AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
668 assert(view_src_hn->addr.chunk == p_hn->addr.chunk && view_src_hn->addr.offset == p_hn->addr.offset);
669 hn->buffer_id = p_hn->buffer_id;
670 hn->addr = p_hn->addr;
671 p_hn->allocated = false; // avoid freeing the parent
672 view_src_hn->allocated = false;
673 ggml_gallocr_free_extra_space(galloc, node, parent: view_src);
674 return;
675 }
676 } else {
677 AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
678 hn->buffer_id = p_hn->buffer_id;
679 hn->addr = p_hn->addr;
680 p_hn->allocated = false; // avoid freeing the parent
681 ggml_gallocr_free_extra_space(galloc, node, parent);
682 return;
683 }
684 }
685 }
686 }
687 // allocate tensor from the buffer
688 struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
689 ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
690 size_t size = ggml_backend_buft_get_alloc_size(buft, tensor: node);
691 hn->buffer_id = buffer_id;
692 hn->addr = ggml_dyn_tallocr_alloc(alloc, size, tensor: node);
693 }
694}
695
696static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
697 // graph outputs are never freed
698 if (node->flags & GGML_TENSOR_FLAG_OUTPUT) {
699 AT_PRINTF("not freeing output %s\n", node->name);
700 return;
701 }
702
703 struct hash_node * hn = ggml_gallocr_hash_get(galloc, t: node);
704 int buffer_id = hn->buffer_id;
705 struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
706 ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
707 size_t size = ggml_backend_buft_get_alloc_size(buft, tensor: node);
708 ggml_dyn_tallocr_free_tensor(alloc, addr: hn->addr, size, tensor: node);
709 hn->allocated = false;
710}
711
712static int get_node_buffer_id(const int * node_buffer_ids, int i) {
713 return node_buffer_ids ? node_buffer_ids[i] : 0;
714}
715
716static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
717 // clear hash tables
718 ggml_hash_set_reset(hash_set: &galloc->hash_set);
719 memset(s: galloc->hash_values, c: 0, n: sizeof(struct hash_node) * galloc->hash_set.size);
720
721 // allocate leafs
722 // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes
723 for (int i = 0; i < graph->n_leafs; i++) {
724 struct ggml_tensor * leaf = graph->leafs[i];
725 ggml_gallocr_allocate_node(galloc, node: leaf, buffer_id: get_node_buffer_id(node_buffer_ids: leaf_buffer_ids, i));
726 }
727
728 // count number of children and views
729 // allocate other graph inputs and leafs first to avoid overwriting them
730 for (int i = 0; i < graph->n_nodes; i++) {
731 struct ggml_tensor * node = graph->nodes[i];
732
733 // TODO: better way to add external dependencies
734 // GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to
735 // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node
736 // itself is never used and should not be considered a dependency
737 if (ggml_is_view(t: node) && node->op != GGML_OP_NONE) {
738 struct ggml_tensor * view_src = node->view_src;
739 ggml_gallocr_hash_get(galloc, t: view_src)->n_views += 1;
740 }
741
742 if (node->flags & GGML_TENSOR_FLAG_INPUT) {
743 ggml_gallocr_allocate_node(galloc, node: graph->nodes[i], buffer_id: get_node_buffer_id(node_buffer_ids, i));
744 }
745
746 for (int j = 0; j < GGML_MAX_SRC; j++) {
747 struct ggml_tensor * src = node->src[j];
748 if (src == NULL) {
749 continue;
750 }
751
752 ggml_gallocr_hash_get(galloc, t: src)->n_children += 1;
753
754 // allocate explicit inputs
755 if (src->flags & GGML_TENSOR_FLAG_INPUT) {
756 ggml_gallocr_allocate_node(galloc, node: src, buffer_id: get_node_buffer_id(node_buffer_ids, i));
757 }
758 }
759 }
760
761 // allocate tensors
762 for (int i = 0; i < graph->n_nodes; i++) {
763 struct ggml_tensor * node = graph->nodes[i];
764 int buffer_id = get_node_buffer_id(node_buffer_ids, i);
765
766 // allocate parents (only leafs need to be allocated at this point)
767 for (int j = 0; j < GGML_MAX_SRC; j++) {
768 struct ggml_tensor * parent = node->src[j];
769 if (parent == NULL) {
770 continue;
771 }
772 ggml_gallocr_allocate_node(galloc, node: parent, buffer_id);
773 }
774
775 // allocate node
776 ggml_gallocr_allocate_node(galloc, node, buffer_id);
777
778 AT_PRINTF("exec: %s (%s) <= ", ggml_op_desc(node), node->name);
779 for (int j = 0; j < GGML_MAX_SRC; j++) {
780 struct ggml_tensor * parent = node->src[j];
781 if (parent == NULL) {
782 continue;
783 }
784 AT_PRINTF("%s", parent->name);
785 if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
786 AT_PRINTF(", ");
787 }
788 }
789 AT_PRINTF("\n");
790
791 // update parents
792 for (int j = 0; j < GGML_MAX_SRC; j++) {
793 struct ggml_tensor * parent = node->src[j];
794 if (parent == NULL) {
795 continue;
796 }
797 struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, t: parent);
798 p_hn->n_children -= 1;
799
800 AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n",
801 parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated);
802
803 if (p_hn->n_children == 0 && p_hn->n_views == 0) {
804 if (ggml_is_view(t: parent)) {
805 struct ggml_tensor * view_src = parent->view_src;
806 struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, t: view_src);
807 view_src_hn->n_views -= 1;
808 AT_PRINTF("view_src %s: %d children, %d views\n",
809 view_src->name, view_src_hn->n_children, view_src_hn->n_views);
810 if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
811 ggml_gallocr_free_node(galloc, node: view_src);
812 }
813 }
814 else if (p_hn->allocated) {
815 ggml_gallocr_free_node(galloc, node: parent);
816 }
817 }
818 AT_PRINTF("\n");
819 }
820 }
821}
822
823bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) {
824 size_t min_hash_size = graph->n_nodes + graph->n_leafs;
825 // add 25% margin to avoid hash collisions
826 min_hash_size += min_hash_size / 4;
827
828 // initialize hash table
829 if (galloc->hash_set.size < min_hash_size) {
830 ggml_hash_set_free(hash_set: &galloc->hash_set);
831 galloc->hash_set = ggml_hash_set_new(size: min_hash_size);
832 GGML_ASSERT(galloc->hash_set.keys != NULL);
833
834 free(ptr: galloc->hash_values);
835 galloc->hash_values = malloc(size: sizeof(struct hash_node) * galloc->hash_set.size);
836 GGML_ASSERT(galloc->hash_values != NULL);
837 }
838
839 // reset allocators
840 for (int i = 0; i < galloc->n_buffers; i++) {
841 ggml_dyn_tallocr_reset(alloc: galloc->buf_tallocs[i]);
842 }
843
844 // allocate in hash table
845 ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids);
846
847 // set the node_allocs from the hash table
848 if (galloc->n_nodes < graph->n_nodes) {
849 free(ptr: galloc->node_allocs);
850 galloc->node_allocs = calloc(nmemb: graph->n_nodes, size: sizeof(struct node_alloc));
851 GGML_ASSERT(galloc->node_allocs != NULL);
852 }
853 galloc->n_nodes = graph->n_nodes;
854 for (int i = 0; i < graph->n_nodes; i++) {
855 struct ggml_tensor * node = graph->nodes[i];
856 struct node_alloc * node_alloc = &galloc->node_allocs[i];
857 if (node->view_src || node->data) {
858 node_alloc->dst.buffer_id = -1;
859 node_alloc->dst.addr = GGML_BUFFER_ADDRESS_INVALID;
860 node_alloc->dst.size_max = 0;
861 } else {
862 struct hash_node * hn = ggml_gallocr_hash_get(galloc, t: node);
863 node_alloc->dst.buffer_id = hn->buffer_id;
864 node_alloc->dst.addr = hn->addr;
865 node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(buft: galloc->bufts[hn->buffer_id], tensor: node);
866 }
867 for (int j = 0; j < GGML_MAX_SRC; j++) {
868 struct ggml_tensor * src = node->src[j];
869 if (!src || src->view_src || src->data) {
870 node_alloc->src[j].buffer_id = -1;
871 node_alloc->src[j].addr = GGML_BUFFER_ADDRESS_INVALID;
872 node_alloc->src[j].size_max = 0;
873 } else {
874 struct hash_node * hn = ggml_gallocr_hash_get(galloc, t: src);
875 node_alloc->src[j].buffer_id = hn->buffer_id;
876 node_alloc->src[j].addr = hn->addr;
877 node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(buft: galloc->bufts[hn->buffer_id], tensor: src);
878 }
879 }
880 }
881 if (galloc->n_leafs < graph->n_leafs) {
882 free(ptr: galloc->leaf_allocs);
883 galloc->leaf_allocs = calloc(nmemb: graph->n_leafs, size: sizeof(galloc->leaf_allocs[0]));
884 GGML_ASSERT(galloc->leaf_allocs != NULL);
885 }
886 galloc->n_leafs = graph->n_leafs;
887 for (int i = 0; i < graph->n_leafs; i++) {
888 struct ggml_tensor * leaf = graph->leafs[i];
889 struct hash_node * hn = ggml_gallocr_hash_get(galloc, t: leaf);
890 if (leaf->view_src || leaf->data) {
891 galloc->leaf_allocs[i].leaf.buffer_id = -1;
892 galloc->leaf_allocs[i].leaf.addr = GGML_BUFFER_ADDRESS_INVALID;
893 galloc->leaf_allocs[i].leaf.size_max = 0;
894 } else {
895 galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
896 galloc->leaf_allocs[i].leaf.addr = hn->addr;
897 galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(buft: galloc->bufts[hn->buffer_id], tensor: leaf);
898 }
899 }
900
901 // reallocate buffers if needed
902 for (int i = 0; i < galloc->n_buffers; i++) {
903 // if the buffer type is used multiple times, we reuse the same buffer
904 for (int j = 0; j < i; j++) {
905 if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
906 galloc->buffers[i] = galloc->buffers[j];
907 break;
908 }
909 }
910
911 // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
912 bool realloc = galloc->buffers[i] == NULL;
913 size_t new_size = 0;
914 for (int c = 0; c < galloc->buf_tallocs[i]->n_chunks; c++) {
915 size_t cur_chunk_size = galloc->buffers[i] ? ggml_vbuffer_chunk_size(buf: galloc->buffers[i], chunk: c) : 0;
916 size_t new_chunk_size = ggml_dyn_tallocr_max_size(alloc: galloc->buf_tallocs[i], chunk: c);
917 new_size += new_chunk_size;
918 if (new_chunk_size > cur_chunk_size) {
919 realloc = true;
920 }
921 }
922 if (realloc) {
923#ifndef NDEBUG
924 size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0;
925 GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
926#endif
927
928 ggml_vbuffer_free(buf: galloc->buffers[i]);
929 galloc->buffers[i] = ggml_vbuffer_alloc(buft: galloc->bufts[i], talloc: galloc->buf_tallocs[i], usage: GGML_BACKEND_BUFFER_USAGE_COMPUTE);
930 if (galloc->buffers[i] == NULL) {
931 GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size);
932 return false;
933 }
934 }
935 }
936
937 return true;
938}
939
940bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
941 return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
942}
943
944static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
945 int buffer_id = tensor_alloc->buffer_id;
946 assert(tensor->data || tensor->view_src || ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
947
948 if (tensor->view_src != NULL) {
949 if (tensor->buffer == NULL) {
950 assert(tensor_alloc->addr.offset == SIZE_MAX);
951 if (tensor->view_src->buffer == NULL) {
952 // this tensor was allocated without ggml-backend
953 return;
954 }
955 ggml_backend_view_init(tensor);
956 }
957 } else {
958 if (tensor->data == NULL) {
959 assert(tensor_alloc->addr.offset != SIZE_MAX);
960 assert(ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
961 ggml_vbuffer_tensor_alloc(buf: galloc->buffers[buffer_id], tensor, buf_addr: tensor_alloc->addr);
962 } else {
963 if (tensor->buffer == NULL) {
964 // this tensor was allocated without ggml-backend
965 return;
966 }
967 }
968 }
969}
970
971static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) {
972 size_t node_size = 0;
973 if (!node->data && !node->view_src) {
974 // If we previously had data but don't now then reallocate
975 if (talloc->buffer_id < 0) {
976 return false;
977 }
978 node_size = ggml_backend_buft_get_alloc_size(buft: galloc->bufts[talloc->buffer_id], tensor: node);
979 }
980 return talloc->size_max >= node_size;
981}
982
983static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
984 if (galloc->n_nodes != graph->n_nodes) {
985#ifndef NDEBUG
986 GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__);
987#endif
988 return true;
989 }
990
991 if (galloc->n_leafs != graph->n_leafs) {
992#ifndef NDEBUG
993 GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__);
994#endif
995 return true;
996 }
997
998 for (int i = 0; i < graph->n_nodes; i++) {
999 struct ggml_tensor * node = graph->nodes[i];
1000 struct node_alloc * node_alloc = &galloc->node_allocs[i];
1001
1002 if (!ggml_gallocr_node_needs_realloc(galloc, node, talloc: &node_alloc->dst)) {
1003#ifndef NDEBUG
1004 GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name);
1005#endif
1006 return true;
1007 }
1008
1009 for (int j = 0; j < GGML_MAX_SRC; j++) {
1010 struct ggml_tensor * src = node->src[j];
1011 if (src == NULL) {
1012 continue;
1013 }
1014 if (!ggml_gallocr_node_needs_realloc(galloc, node: src, talloc: &node_alloc->src[j])) {
1015#ifndef NDEBUG
1016 GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
1017#endif
1018 return true;
1019 }
1020 }
1021 }
1022
1023 return false;
1024}
1025
1026bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph) {
1027 if (ggml_gallocr_needs_realloc(galloc, graph)) {
1028 if (galloc->n_buffers == 1) {
1029#ifndef NDEBUG
1030 GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__);
1031#endif
1032 if (!ggml_gallocr_reserve(galloc, graph)) {
1033 return false;
1034 }
1035 } else {
1036#ifndef NDEBUG
1037 GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__);
1038#endif
1039 return false;
1040 }
1041 }
1042
1043 // reset buffers
1044 for (int i = 0; i < galloc->n_buffers; i++) {
1045 if (galloc->buffers[i] != NULL) {
1046 ggml_vbuffer_reset(buf: galloc->buffers[i]);
1047 }
1048 }
1049
1050 // allocate the graph tensors from the previous assignments
1051 // leafs
1052 for (int i = 0; i < graph->n_leafs; i++) {
1053 struct ggml_tensor * leaf = graph->leafs[i];
1054 struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
1055 ggml_gallocr_init_tensor(galloc, tensor: leaf, tensor_alloc: &leaf_alloc->leaf);
1056 }
1057 // nodes
1058 for (int i = 0; i < graph->n_nodes; i++) {
1059 struct ggml_tensor * node = graph->nodes[i];
1060 struct node_alloc * node_alloc = &galloc->node_allocs[i];
1061 for (int j = 0; j < GGML_MAX_SRC; j++) {
1062 struct ggml_tensor * src = node->src[j];
1063 if (src == NULL) {
1064 continue;
1065 }
1066 ggml_gallocr_init_tensor(galloc, tensor: src, tensor_alloc: &node_alloc->src[j]);
1067 }
1068 ggml_gallocr_init_tensor(galloc, tensor: node, tensor_alloc: &node_alloc->dst);
1069 }
1070
1071 return true;
1072}
1073
1074size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) {
1075 GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
1076
1077 if (galloc->buffers[buffer_id] == NULL) {
1078 return 0;
1079 }
1080
1081 for (int i = 0; i < buffer_id; i++) {
1082 if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
1083 // this buffer is the same as a previous one due to the same buffer type being used multiple times
1084 // only return the buffer size the first time it appears to avoid double counting
1085 return 0;
1086 }
1087 }
1088
1089 return ggml_vbuffer_size(buf: galloc->buffers[buffer_id]);
1090}
1091
1092// utils
1093
1094static void free_buffers(ggml_backend_buffer_t ** buffers, const size_t * n_buffers) {
1095 for (size_t i = 0; i < *n_buffers; i++) {
1096 ggml_backend_buffer_free(buffer: (*buffers)[i]);
1097 }
1098 free(ptr: *buffers);
1099}
1100
1101static bool alloc_tensor_range(struct ggml_context * ctx,
1102 struct ggml_tensor * first, struct ggml_tensor * last,
1103 ggml_backend_buffer_type_t buft, size_t size,
1104 ggml_backend_buffer_t ** buffers, size_t * n_buffers) {
1105
1106 ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
1107 if (buffer == NULL) {
1108 GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size);
1109 free_buffers(buffers, n_buffers);
1110 return false;
1111 }
1112
1113 *buffers = realloc(ptr: *buffers, size: sizeof(ggml_backend_buffer_t) * (*n_buffers + 1));
1114 (*buffers)[(*n_buffers)++] = buffer;
1115
1116 struct ggml_tallocr tallocr = ggml_tallocr_new(buffer);
1117
1118 for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, tensor: t)) {
1119 enum ggml_status status = GGML_STATUS_SUCCESS;
1120 if (t->data == NULL) {
1121 if (t->view_src == NULL) {
1122 status = ggml_tallocr_alloc(talloc: &tallocr, tensor: t);
1123 } else if (t->buffer == NULL) {
1124 status = ggml_backend_view_init(tensor: t);
1125 }
1126 } else {
1127 if (t->view_src != NULL && t->buffer == NULL) {
1128 // view of a pre-allocated tensor
1129 status = ggml_backend_view_init(tensor: t);
1130 }
1131 }
1132 if (status != GGML_STATUS_SUCCESS) {
1133 GGML_LOG_ERROR("%s: failed to initialize tensor %s\n", __func__, t->name);
1134 free_buffers(buffers, n_buffers);
1135 return false;
1136 }
1137 }
1138
1139 return true;
1140}
1141
1142ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) {
1143 GGML_ASSERT(ggml_get_no_alloc(ctx) == true);
1144
1145 size_t alignment = ggml_backend_buft_get_alignment(buft);
1146 size_t max_size = ggml_backend_buft_get_max_size(buft);
1147
1148 ggml_backend_buffer_t * buffers = NULL;
1149 size_t n_buffers = 0;
1150
1151 size_t cur_buf_size = 0;
1152 struct ggml_tensor * first = ggml_get_first_tensor(ctx);
1153 for (struct ggml_tensor * t = first; t != NULL; t = ggml_get_next_tensor(ctx, tensor: t)) {
1154 size_t this_size = 0;
1155 if (t->data == NULL && t->view_src == NULL) {
1156 this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment);
1157 }
1158
1159 if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) {
1160 // allocate tensors in the current buffer
1161 if (!alloc_tensor_range(ctx, first, last: t, buft, size: cur_buf_size, buffers: &buffers, n_buffers: &n_buffers)) {
1162 return NULL;
1163 }
1164 first = t;
1165 cur_buf_size = this_size;
1166 } else {
1167 cur_buf_size += this_size;
1168 }
1169 }
1170
1171 // allocate remaining tensors
1172 if (cur_buf_size > 0) {
1173 if (!alloc_tensor_range(ctx, first, NULL, buft, size: cur_buf_size, buffers: &buffers, n_buffers: &n_buffers)) {
1174 return NULL;
1175 }
1176 }
1177
1178 if (n_buffers == 0) {
1179#ifndef NDEBUG
1180 GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__);
1181#endif
1182 return NULL;
1183 }
1184
1185 ggml_backend_buffer_t buffer;
1186 if (n_buffers == 1) {
1187 buffer = buffers[0];
1188 } else {
1189 buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers);
1190 }
1191 free(ptr: buffers);
1192 return buffer;
1193}
1194
1195ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) {
1196 return ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft: ggml_backend_get_default_buffer_type(backend));
1197}
1198