1 | /**************************************************************************/ |
2 | /* pool_allocator.cpp */ |
3 | /**************************************************************************/ |
4 | /* This file is part of: */ |
5 | /* GODOT ENGINE */ |
6 | /* https://godotengine.org */ |
7 | /**************************************************************************/ |
8 | /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */ |
9 | /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */ |
10 | /* */ |
11 | /* Permission is hereby granted, free of charge, to any person obtaining */ |
12 | /* a copy of this software and associated documentation files (the */ |
13 | /* "Software"), to deal in the Software without restriction, including */ |
14 | /* without limitation the rights to use, copy, modify, merge, publish, */ |
15 | /* distribute, sublicense, and/or sell copies of the Software, and to */ |
16 | /* permit persons to whom the Software is furnished to do so, subject to */ |
17 | /* the following conditions: */ |
18 | /* */ |
19 | /* The above copyright notice and this permission notice shall be */ |
20 | /* included in all copies or substantial portions of the Software. */ |
21 | /* */ |
22 | /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ |
23 | /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ |
24 | /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */ |
25 | /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ |
26 | /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ |
27 | /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ |
28 | /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ |
29 | /**************************************************************************/ |
30 | |
31 | #include "pool_allocator.h" |
32 | |
33 | #include "core/error/error_macros.h" |
34 | #include "core/os/memory.h" |
35 | #include "core/os/os.h" |
36 | #include "core/string/print_string.h" |
37 | |
38 | #define COMPACT_CHUNK(m_entry, m_to_pos) \ |
39 | do { \ |
40 | void *_dst = &((unsigned char *)pool)[m_to_pos]; \ |
41 | void *_src = &((unsigned char *)pool)[(m_entry).pos]; \ |
42 | memmove(_dst, _src, aligned((m_entry).len)); \ |
43 | (m_entry).pos = m_to_pos; \ |
44 | } while (0); |
45 | |
46 | void PoolAllocator::mt_lock() const { |
47 | } |
48 | |
49 | void PoolAllocator::mt_unlock() const { |
50 | } |
51 | |
52 | bool PoolAllocator::get_free_entry(EntryArrayPos *p_pos) { |
53 | if (entry_count == entry_max) { |
54 | return false; |
55 | } |
56 | |
57 | for (int i = 0; i < entry_max; i++) { |
58 | if (entry_array[i].len == 0) { |
59 | *p_pos = i; |
60 | return true; |
61 | } |
62 | } |
63 | |
64 | ERR_PRINT("Out of memory Chunks!" ); |
65 | |
66 | return false; // |
67 | } |
68 | |
69 | /** |
70 | * Find a hole |
71 | * @param p_pos The hole is behind the block pointed by this variable upon return. if pos==entry_count, then allocate at end |
72 | * @param p_for_size hole size |
73 | * @return false if hole found, true if no hole found |
74 | */ |
75 | bool PoolAllocator::find_hole(EntryArrayPos *p_pos, int p_for_size) { |
76 | /* position where previous entry ends. Defaults to zero (begin of pool) */ |
77 | |
78 | int prev_entry_end_pos = 0; |
79 | |
80 | for (int i = 0; i < entry_count; i++) { |
81 | Entry &entry = entry_array[entry_indices[i]]; |
82 | |
83 | /* determine hole size to previous entry */ |
84 | |
85 | int hole_size = entry.pos - prev_entry_end_pos; |
86 | |
87 | /* determine if what we want fits in that hole */ |
88 | if (hole_size >= p_for_size) { |
89 | *p_pos = i; |
90 | return true; |
91 | } |
92 | |
93 | /* prepare for next one */ |
94 | prev_entry_end_pos = entry_end(entry); |
95 | } |
96 | |
97 | /* No holes between entries, check at the end..*/ |
98 | |
99 | if ((pool_size - prev_entry_end_pos) >= p_for_size) { |
100 | *p_pos = entry_count; |
101 | return true; |
102 | } |
103 | |
104 | return false; |
105 | } |
106 | |
107 | void PoolAllocator::compact(int p_up_to) { |
108 | uint32_t prev_entry_end_pos = 0; |
109 | |
110 | if (p_up_to < 0) { |
111 | p_up_to = entry_count; |
112 | } |
113 | for (int i = 0; i < p_up_to; i++) { |
114 | Entry &entry = entry_array[entry_indices[i]]; |
115 | |
116 | /* determine hole size to previous entry */ |
117 | |
118 | int hole_size = entry.pos - prev_entry_end_pos; |
119 | |
120 | /* if we can compact, do it */ |
121 | if (hole_size > 0 && !entry.lock) { |
122 | COMPACT_CHUNK(entry, prev_entry_end_pos); |
123 | } |
124 | |
125 | /* prepare for next one */ |
126 | prev_entry_end_pos = entry_end(entry); |
127 | } |
128 | } |
129 | |
130 | void PoolAllocator::compact_up(int p_from) { |
131 | uint32_t next_entry_end_pos = pool_size; // - static_area_size; |
132 | |
133 | for (int i = entry_count - 1; i >= p_from; i--) { |
134 | Entry &entry = entry_array[entry_indices[i]]; |
135 | |
136 | /* determine hole size for next entry */ |
137 | |
138 | int hole_size = next_entry_end_pos - (entry.pos + aligned(entry.len)); |
139 | |
140 | /* if we can compact, do it */ |
141 | if (hole_size > 0 && !entry.lock) { |
142 | COMPACT_CHUNK(entry, (next_entry_end_pos - aligned(entry.len))); |
143 | } |
144 | |
145 | /* prepare for next one */ |
146 | next_entry_end_pos = entry.pos; |
147 | } |
148 | } |
149 | |
150 | bool PoolAllocator::find_entry_index(EntryIndicesPos *p_map_pos, const Entry *p_entry) { |
151 | EntryArrayPos entry_pos = entry_max; |
152 | |
153 | for (int i = 0; i < entry_count; i++) { |
154 | if (&entry_array[entry_indices[i]] == p_entry) { |
155 | entry_pos = i; |
156 | break; |
157 | } |
158 | } |
159 | |
160 | if (entry_pos == entry_max) { |
161 | return false; |
162 | } |
163 | |
164 | *p_map_pos = entry_pos; |
165 | return true; |
166 | } |
167 | |
168 | PoolAllocator::ID PoolAllocator::alloc(int p_size) { |
169 | ERR_FAIL_COND_V(p_size < 1, POOL_ALLOCATOR_INVALID_ID); |
170 | ERR_FAIL_COND_V(p_size > free_mem, POOL_ALLOCATOR_INVALID_ID); |
171 | |
172 | mt_lock(); |
173 | |
174 | if (entry_count == entry_max) { |
175 | mt_unlock(); |
176 | ERR_PRINT("entry_count==entry_max" ); |
177 | return POOL_ALLOCATOR_INVALID_ID; |
178 | } |
179 | |
180 | int size_to_alloc = aligned(p_size); |
181 | |
182 | EntryIndicesPos new_entry_indices_pos; |
183 | |
184 | if (!find_hole(&new_entry_indices_pos, size_to_alloc)) { |
185 | /* No hole could be found, try compacting mem */ |
186 | compact(); |
187 | /* Then search again */ |
188 | |
189 | if (!find_hole(&new_entry_indices_pos, size_to_alloc)) { |
190 | mt_unlock(); |
191 | ERR_FAIL_V_MSG(POOL_ALLOCATOR_INVALID_ID, "Memory can't be compacted further." ); |
192 | } |
193 | } |
194 | |
195 | EntryArrayPos new_entry_array_pos; |
196 | |
197 | bool found_free_entry = get_free_entry(&new_entry_array_pos); |
198 | |
199 | if (!found_free_entry) { |
200 | mt_unlock(); |
201 | ERR_FAIL_V_MSG(POOL_ALLOCATOR_INVALID_ID, "No free entry found in PoolAllocator." ); |
202 | } |
203 | |
204 | /* move all entry indices up, make room for this one */ |
205 | for (int i = entry_count; i > new_entry_indices_pos; i--) { |
206 | entry_indices[i] = entry_indices[i - 1]; |
207 | } |
208 | |
209 | entry_indices[new_entry_indices_pos] = new_entry_array_pos; |
210 | |
211 | entry_count++; |
212 | |
213 | Entry &entry = entry_array[entry_indices[new_entry_indices_pos]]; |
214 | |
215 | entry.len = p_size; |
216 | entry.pos = (new_entry_indices_pos == 0) ? 0 : entry_end(entry_array[entry_indices[new_entry_indices_pos - 1]]); //alloc either at beginning or end of previous |
217 | entry.lock = 0; |
218 | entry.check = (check_count++) & CHECK_MASK; |
219 | free_mem -= size_to_alloc; |
220 | if (free_mem < free_mem_peak) { |
221 | free_mem_peak = free_mem; |
222 | } |
223 | |
224 | ID retval = (entry_indices[new_entry_indices_pos] << CHECK_BITS) | entry.check; |
225 | mt_unlock(); |
226 | |
227 | //ERR_FAIL_COND_V( (uintptr_t)get(retval)%align != 0, retval ); |
228 | |
229 | return retval; |
230 | } |
231 | |
232 | PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) { |
233 | unsigned int check = p_mem & CHECK_MASK; |
234 | int entry = p_mem >> CHECK_BITS; |
235 | ERR_FAIL_INDEX_V(entry, entry_max, nullptr); |
236 | ERR_FAIL_COND_V(entry_array[entry].check != check, nullptr); |
237 | ERR_FAIL_COND_V(entry_array[entry].len == 0, nullptr); |
238 | |
239 | return &entry_array[entry]; |
240 | } |
241 | |
242 | const PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) const { |
243 | unsigned int check = p_mem & CHECK_MASK; |
244 | int entry = p_mem >> CHECK_BITS; |
245 | ERR_FAIL_INDEX_V(entry, entry_max, nullptr); |
246 | ERR_FAIL_COND_V(entry_array[entry].check != check, nullptr); |
247 | ERR_FAIL_COND_V(entry_array[entry].len == 0, nullptr); |
248 | |
249 | return &entry_array[entry]; |
250 | } |
251 | |
252 | void PoolAllocator::free(ID p_mem) { |
253 | mt_lock(); |
254 | Entry *e = get_entry(p_mem); |
255 | if (!e) { |
256 | mt_unlock(); |
257 | ERR_PRINT("!e" ); |
258 | return; |
259 | } |
260 | if (e->lock) { |
261 | mt_unlock(); |
262 | ERR_PRINT("e->lock" ); |
263 | return; |
264 | } |
265 | |
266 | EntryIndicesPos entry_indices_pos; |
267 | |
268 | bool index_found = find_entry_index(&entry_indices_pos, e); |
269 | if (!index_found) { |
270 | mt_unlock(); |
271 | ERR_FAIL_COND(!index_found); |
272 | } |
273 | |
274 | for (int i = entry_indices_pos; i < (entry_count - 1); i++) { |
275 | entry_indices[i] = entry_indices[i + 1]; |
276 | } |
277 | |
278 | entry_count--; |
279 | free_mem += aligned(e->len); |
280 | e->clear(); |
281 | mt_unlock(); |
282 | } |
283 | |
284 | int PoolAllocator::get_size(ID p_mem) const { |
285 | int size; |
286 | mt_lock(); |
287 | |
288 | const Entry *e = get_entry(p_mem); |
289 | if (!e) { |
290 | mt_unlock(); |
291 | ERR_PRINT("!e" ); |
292 | return 0; |
293 | } |
294 | |
295 | size = e->len; |
296 | |
297 | mt_unlock(); |
298 | |
299 | return size; |
300 | } |
301 | |
302 | Error PoolAllocator::resize(ID p_mem, int p_new_size) { |
303 | mt_lock(); |
304 | Entry *e = get_entry(p_mem); |
305 | |
306 | if (!e) { |
307 | mt_unlock(); |
308 | ERR_FAIL_NULL_V(e, ERR_INVALID_PARAMETER); |
309 | } |
310 | |
311 | if (needs_locking && e->lock) { |
312 | mt_unlock(); |
313 | ERR_FAIL_COND_V(e->lock, ERR_ALREADY_IN_USE); |
314 | } |
315 | |
316 | uint32_t alloc_size = aligned(p_new_size); |
317 | |
318 | if ((uint32_t)aligned(e->len) == alloc_size) { |
319 | e->len = p_new_size; |
320 | mt_unlock(); |
321 | return OK; |
322 | } else if (e->len > (uint32_t)p_new_size) { |
323 | free_mem += aligned(e->len); |
324 | free_mem -= alloc_size; |
325 | e->len = p_new_size; |
326 | mt_unlock(); |
327 | return OK; |
328 | } |
329 | |
330 | //p_new_size = align(p_new_size) |
331 | int _free = free_mem; // - static_area_size; |
332 | |
333 | if (uint32_t(_free + aligned(e->len)) < alloc_size) { |
334 | mt_unlock(); |
335 | ERR_FAIL_V(ERR_OUT_OF_MEMORY); |
336 | } |
337 | |
338 | EntryIndicesPos entry_indices_pos; |
339 | |
340 | bool index_found = find_entry_index(&entry_indices_pos, e); |
341 | |
342 | if (!index_found) { |
343 | mt_unlock(); |
344 | ERR_FAIL_COND_V(!index_found, ERR_BUG); |
345 | } |
346 | |
347 | //no need to move stuff around, it fits before the next block |
348 | uint32_t next_pos; |
349 | if (entry_indices_pos + 1 == entry_count) { |
350 | next_pos = pool_size; // - static_area_size; |
351 | } else { |
352 | next_pos = entry_array[entry_indices[entry_indices_pos + 1]].pos; |
353 | } |
354 | |
355 | if ((next_pos - e->pos) > alloc_size) { |
356 | free_mem += aligned(e->len); |
357 | e->len = p_new_size; |
358 | free_mem -= alloc_size; |
359 | mt_unlock(); |
360 | return OK; |
361 | } |
362 | //it doesn't fit, compact around BEFORE current index (make room behind) |
363 | |
364 | compact(entry_indices_pos + 1); |
365 | |
366 | if ((next_pos - e->pos) > alloc_size) { |
367 | //now fits! hooray! |
368 | free_mem += aligned(e->len); |
369 | e->len = p_new_size; |
370 | free_mem -= alloc_size; |
371 | mt_unlock(); |
372 | if (free_mem < free_mem_peak) { |
373 | free_mem_peak = free_mem; |
374 | } |
375 | return OK; |
376 | } |
377 | |
378 | //STILL doesn't fit, compact around AFTER current index (make room after) |
379 | |
380 | compact_up(entry_indices_pos + 1); |
381 | |
382 | if ((entry_array[entry_indices[entry_indices_pos + 1]].pos - e->pos) > alloc_size) { |
383 | //now fits! hooray! |
384 | free_mem += aligned(e->len); |
385 | e->len = p_new_size; |
386 | free_mem -= alloc_size; |
387 | mt_unlock(); |
388 | if (free_mem < free_mem_peak) { |
389 | free_mem_peak = free_mem; |
390 | } |
391 | return OK; |
392 | } |
393 | |
394 | mt_unlock(); |
395 | ERR_FAIL_V(ERR_OUT_OF_MEMORY); |
396 | } |
397 | |
398 | Error PoolAllocator::lock(ID p_mem) { |
399 | if (!needs_locking) { |
400 | return OK; |
401 | } |
402 | mt_lock(); |
403 | Entry *e = get_entry(p_mem); |
404 | if (!e) { |
405 | mt_unlock(); |
406 | ERR_PRINT("!e" ); |
407 | return ERR_INVALID_PARAMETER; |
408 | } |
409 | e->lock++; |
410 | mt_unlock(); |
411 | return OK; |
412 | } |
413 | |
414 | bool PoolAllocator::is_locked(ID p_mem) const { |
415 | if (!needs_locking) { |
416 | return false; |
417 | } |
418 | |
419 | mt_lock(); |
420 | const Entry *e = const_cast<PoolAllocator *>(this)->get_entry(p_mem); |
421 | if (!e) { |
422 | mt_unlock(); |
423 | ERR_PRINT("!e" ); |
424 | return false; |
425 | } |
426 | bool locked = e->lock; |
427 | mt_unlock(); |
428 | return locked; |
429 | } |
430 | |
431 | const void *PoolAllocator::get(ID p_mem) const { |
432 | if (!needs_locking) { |
433 | const Entry *e = get_entry(p_mem); |
434 | ERR_FAIL_NULL_V(e, nullptr); |
435 | return &pool[e->pos]; |
436 | } |
437 | |
438 | mt_lock(); |
439 | const Entry *e = get_entry(p_mem); |
440 | |
441 | if (!e) { |
442 | mt_unlock(); |
443 | ERR_FAIL_NULL_V(e, nullptr); |
444 | } |
445 | if (e->lock == 0) { |
446 | mt_unlock(); |
447 | ERR_PRINT("e->lock == 0" ); |
448 | return nullptr; |
449 | } |
450 | |
451 | if ((int)e->pos >= pool_size) { |
452 | mt_unlock(); |
453 | ERR_PRINT("e->pos<0 || e->pos>=pool_size" ); |
454 | return nullptr; |
455 | } |
456 | const void *ptr = &pool[e->pos]; |
457 | |
458 | mt_unlock(); |
459 | |
460 | return ptr; |
461 | } |
462 | |
463 | void *PoolAllocator::get(ID p_mem) { |
464 | if (!needs_locking) { |
465 | Entry *e = get_entry(p_mem); |
466 | ERR_FAIL_NULL_V(e, nullptr); |
467 | return &pool[e->pos]; |
468 | } |
469 | |
470 | mt_lock(); |
471 | Entry *e = get_entry(p_mem); |
472 | |
473 | if (!e) { |
474 | mt_unlock(); |
475 | ERR_FAIL_NULL_V(e, nullptr); |
476 | } |
477 | if (e->lock == 0) { |
478 | mt_unlock(); |
479 | ERR_PRINT("e->lock == 0" ); |
480 | return nullptr; |
481 | } |
482 | |
483 | if ((int)e->pos >= pool_size) { |
484 | mt_unlock(); |
485 | ERR_PRINT("e->pos<0 || e->pos>=pool_size" ); |
486 | return nullptr; |
487 | } |
488 | void *ptr = &pool[e->pos]; |
489 | |
490 | mt_unlock(); |
491 | |
492 | return ptr; |
493 | } |
494 | |
495 | void PoolAllocator::unlock(ID p_mem) { |
496 | if (!needs_locking) { |
497 | return; |
498 | } |
499 | mt_lock(); |
500 | Entry *e = get_entry(p_mem); |
501 | if (!e) { |
502 | mt_unlock(); |
503 | ERR_FAIL_NULL(e); |
504 | } |
505 | if (e->lock == 0) { |
506 | mt_unlock(); |
507 | ERR_PRINT("e->lock == 0" ); |
508 | return; |
509 | } |
510 | e->lock--; |
511 | mt_unlock(); |
512 | } |
513 | |
514 | int PoolAllocator::get_used_mem() const { |
515 | return pool_size - free_mem; |
516 | } |
517 | |
518 | int PoolAllocator::get_free_peak() { |
519 | return free_mem_peak; |
520 | } |
521 | |
522 | int PoolAllocator::get_free_mem() { |
523 | return free_mem; |
524 | } |
525 | |
526 | void PoolAllocator::create_pool(void *p_mem, int p_size, int p_max_entries) { |
527 | pool = (uint8_t *)p_mem; |
528 | pool_size = p_size; |
529 | |
530 | entry_array = memnew_arr(Entry, p_max_entries); |
531 | entry_indices = memnew_arr(int, p_max_entries); |
532 | entry_max = p_max_entries; |
533 | entry_count = 0; |
534 | |
535 | free_mem = p_size; |
536 | free_mem_peak = p_size; |
537 | |
538 | check_count = 0; |
539 | } |
540 | |
541 | PoolAllocator::PoolAllocator(int p_size, bool p_needs_locking, int p_max_entries) { |
542 | mem_ptr = memalloc(p_size); |
543 | ERR_FAIL_NULL(mem_ptr); |
544 | align = 1; |
545 | create_pool(mem_ptr, p_size, p_max_entries); |
546 | needs_locking = p_needs_locking; |
547 | } |
548 | |
549 | PoolAllocator::PoolAllocator(void *p_mem, int p_size, int p_align, bool p_needs_locking, int p_max_entries) { |
550 | if (p_align > 1) { |
551 | uint8_t *mem8 = (uint8_t *)p_mem; |
552 | uint64_t ofs = (uint64_t)mem8; |
553 | if (ofs % p_align) { |
554 | int dif = p_align - (ofs % p_align); |
555 | mem8 += p_align - (ofs % p_align); |
556 | p_size -= dif; |
557 | p_mem = (void *)mem8; |
558 | } |
559 | } |
560 | |
561 | create_pool(p_mem, p_size, p_max_entries); |
562 | needs_locking = p_needs_locking; |
563 | align = p_align; |
564 | mem_ptr = nullptr; |
565 | } |
566 | |
567 | PoolAllocator::PoolAllocator(int p_align, int p_size, bool p_needs_locking, int p_max_entries) { |
568 | ERR_FAIL_COND(p_align < 1); |
569 | mem_ptr = Memory::alloc_static(p_size + p_align, true); |
570 | uint8_t *mem8 = (uint8_t *)mem_ptr; |
571 | uint64_t ofs = (uint64_t)mem8; |
572 | if (ofs % p_align) { |
573 | mem8 += p_align - (ofs % p_align); |
574 | } |
575 | create_pool(mem8, p_size, p_max_entries); |
576 | needs_locking = p_needs_locking; |
577 | align = p_align; |
578 | } |
579 | |
580 | PoolAllocator::~PoolAllocator() { |
581 | if (mem_ptr) { |
582 | memfree(mem_ptr); |
583 | } |
584 | |
585 | memdelete_arr(entry_array); |
586 | memdelete_arr(entry_indices); |
587 | } |
588 | |