1/**************************************************************************/
2/* paged_array.h */
3/**************************************************************************/
4/* This file is part of: */
5/* GODOT ENGINE */
6/* https://godotengine.org */
7/**************************************************************************/
8/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10/* */
11/* Permission is hereby granted, free of charge, to any person obtaining */
12/* a copy of this software and associated documentation files (the */
13/* "Software"), to deal in the Software without restriction, including */
14/* without limitation the rights to use, copy, modify, merge, publish, */
15/* distribute, sublicense, and/or sell copies of the Software, and to */
16/* permit persons to whom the Software is furnished to do so, subject to */
17/* the following conditions: */
18/* */
19/* The above copyright notice and this permission notice shall be */
20/* included in all copies or substantial portions of the Software. */
21/* */
22/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29/**************************************************************************/
30
31#ifndef PAGED_ARRAY_H
32#define PAGED_ARRAY_H
33
34#include "core/os/memory.h"
35#include "core/os/spin_lock.h"
36#include "core/typedefs.h"
37
38#include <type_traits>
39
40// PagedArray is used mainly for filling a very large array from multiple threads efficiently and without causing major fragmentation
41
42// PageArrayPool manages central page allocation in a thread safe matter
43
44template <class T>
45class PagedArrayPool {
46 T **page_pool = nullptr;
47 uint32_t pages_allocated = 0;
48
49 uint32_t *available_page_pool = nullptr;
50 uint32_t pages_available = 0;
51
52 uint32_t page_size = 0;
53 SpinLock spin_lock;
54
55public:
56 uint32_t alloc_page() {
57 spin_lock.lock();
58 if (unlikely(pages_available == 0)) {
59 uint32_t pages_used = pages_allocated;
60
61 pages_allocated++;
62 page_pool = (T **)memrealloc(page_pool, sizeof(T *) * pages_allocated);
63 available_page_pool = (uint32_t *)memrealloc(available_page_pool, sizeof(uint32_t) * pages_allocated);
64
65 page_pool[pages_used] = (T *)memalloc(sizeof(T) * page_size);
66 available_page_pool[0] = pages_used;
67
68 pages_available++;
69 }
70
71 pages_available--;
72 uint32_t page = available_page_pool[pages_available];
73 spin_lock.unlock();
74
75 return page;
76 }
77 T *get_page(uint32_t p_page_id) {
78 return page_pool[p_page_id];
79 }
80
81 void free_page(uint32_t p_page_id) {
82 spin_lock.lock();
83 available_page_pool[pages_available] = p_page_id;
84 pages_available++;
85 spin_lock.unlock();
86 }
87
88 uint32_t get_page_size_shift() const {
89 return get_shift_from_power_of_2(page_size);
90 }
91
92 uint32_t get_page_size_mask() const {
93 return page_size - 1;
94 }
95
96 void reset() {
97 ERR_FAIL_COND(pages_available < pages_allocated);
98 if (pages_allocated) {
99 for (uint32_t i = 0; i < pages_allocated; i++) {
100 memfree(page_pool[i]);
101 }
102 memfree(page_pool);
103 memfree(available_page_pool);
104 page_pool = nullptr;
105 available_page_pool = nullptr;
106 pages_allocated = 0;
107 pages_available = 0;
108 }
109 }
110 bool is_configured() const {
111 return page_size > 0;
112 }
113
114 void configure(uint32_t p_page_size) {
115 ERR_FAIL_COND(page_pool != nullptr); // Safety check.
116 ERR_FAIL_COND(p_page_size == 0);
117 page_size = nearest_power_of_2_templated(p_page_size);
118 }
119
120 PagedArrayPool(uint32_t p_page_size = 4096) { // power of 2 recommended because of alignment with OS page sizes. Even if element is bigger, its still a multiple and get rounded amount of pages
121 configure(p_page_size);
122 }
123
124 ~PagedArrayPool() {
125 ERR_FAIL_COND_MSG(pages_available < pages_allocated, "Pages in use exist at exit in PagedArrayPool");
126 reset();
127 }
128};
129
130// PageArray is a local array that is optimized to grow in place, then be cleared often.
131// It does so by allocating pages from a PagedArrayPool.
132// It is safe to use multiple PagedArrays from different threads, sharing a single PagedArrayPool
133
134template <class T>
135class PagedArray {
136 PagedArrayPool<T> *page_pool = nullptr;
137
138 T **page_data = nullptr;
139 uint32_t *page_ids = nullptr;
140 uint32_t max_pages_used = 0;
141 uint32_t page_size_shift = 0;
142 uint32_t page_size_mask = 0;
143 uint64_t count = 0;
144
145 _FORCE_INLINE_ uint32_t _get_pages_in_use() const {
146 if (count == 0) {
147 return 0;
148 } else {
149 return ((count - 1) >> page_size_shift) + 1;
150 }
151 }
152
153 void _grow_page_array() {
154 //no more room in the page array to put the new page, make room
155 if (max_pages_used == 0) {
156 max_pages_used = 1;
157 } else {
158 max_pages_used *= 2; // increase in powers of 2 to keep allocations to minimum
159 }
160 page_data = (T **)memrealloc(page_data, sizeof(T *) * max_pages_used);
161 page_ids = (uint32_t *)memrealloc(page_ids, sizeof(uint32_t) * max_pages_used);
162 }
163
164public:
165 _FORCE_INLINE_ const T &operator[](uint64_t p_index) const {
166 CRASH_BAD_UNSIGNED_INDEX(p_index, count);
167 uint32_t page = p_index >> page_size_shift;
168 uint32_t offset = p_index & page_size_mask;
169
170 return page_data[page][offset];
171 }
172 _FORCE_INLINE_ T &operator[](uint64_t p_index) {
173 CRASH_BAD_UNSIGNED_INDEX(p_index, count);
174 uint32_t page = p_index >> page_size_shift;
175 uint32_t offset = p_index & page_size_mask;
176
177 return page_data[page][offset];
178 }
179
180 _FORCE_INLINE_ void push_back(const T &p_value) {
181 uint32_t remainder = count & page_size_mask;
182 if (unlikely(remainder == 0)) {
183 // at 0, so time to request a new page
184 uint32_t page_count = _get_pages_in_use();
185 uint32_t new_page_count = page_count + 1;
186
187 if (unlikely(new_page_count > max_pages_used)) {
188 ERR_FAIL_NULL(page_pool); // Safety check.
189
190 _grow_page_array(); //keep out of inline
191 }
192
193 uint32_t page_id = page_pool->alloc_page();
194 page_data[page_count] = page_pool->get_page(page_id);
195 page_ids[page_count] = page_id;
196 }
197
198 // place the new value
199 uint32_t page = count >> page_size_shift;
200 uint32_t offset = count & page_size_mask;
201
202 if (!std::is_trivially_constructible<T>::value) {
203 memnew_placement(&page_data[page][offset], T(p_value));
204 } else {
205 page_data[page][offset] = p_value;
206 }
207
208 count++;
209 }
210
211 _FORCE_INLINE_ void pop_back() {
212 ERR_FAIL_COND(count == 0);
213
214 if (!std::is_trivially_destructible<T>::value) {
215 uint32_t page = (count - 1) >> page_size_shift;
216 uint32_t offset = (count - 1) & page_size_mask;
217 page_data[page][offset].~T();
218 }
219
220 uint32_t remainder = count & page_size_mask;
221 if (unlikely(remainder == 1)) {
222 // one element remained, so page must be freed.
223 uint32_t last_page = _get_pages_in_use() - 1;
224 page_pool->free_page(page_ids[last_page]);
225 }
226 count--;
227 }
228
229 void clear() {
230 //destruct if needed
231 if (!std::is_trivially_destructible<T>::value) {
232 for (uint64_t i = 0; i < count; i++) {
233 uint32_t page = i >> page_size_shift;
234 uint32_t offset = i & page_size_mask;
235 page_data[page][offset].~T();
236 }
237 }
238
239 //return the pages to the pagepool, so they can be used by another array eventually
240 uint32_t pages_used = _get_pages_in_use();
241 for (uint32_t i = 0; i < pages_used; i++) {
242 page_pool->free_page(page_ids[i]);
243 }
244
245 count = 0;
246
247 //note we leave page_data and page_indices intact for next use. If you really want to clear them call reset()
248 }
249
250 void reset() {
251 clear();
252 if (page_data) {
253 memfree(page_data);
254 memfree(page_ids);
255 page_data = nullptr;
256 page_ids = nullptr;
257 max_pages_used = 0;
258 }
259 }
260
261 // This takes the pages from a source array and merges them to this one
262 // resulting order is undefined, but content is merged very efficiently,
263 // making it ideal to fill content on several threads to later join it.
264
265 void merge_unordered(PagedArray<T> &p_array) {
266 ERR_FAIL_COND(page_pool != p_array.page_pool);
267
268 uint32_t remainder = count & page_size_mask;
269
270 T *remainder_page = nullptr;
271 uint32_t remainder_page_id = 0;
272
273 if (remainder > 0) {
274 uint32_t last_page = _get_pages_in_use() - 1;
275 remainder_page = page_data[last_page];
276 remainder_page_id = page_ids[last_page];
277 }
278
279 count -= remainder;
280
281 uint32_t src_page_index = 0;
282 uint32_t page_size = page_size_mask + 1;
283
284 while (p_array.count > 0) {
285 uint32_t page_count = _get_pages_in_use();
286 uint32_t new_page_count = page_count + 1;
287
288 if (unlikely(new_page_count > max_pages_used)) {
289 _grow_page_array(); //keep out of inline
290 }
291
292 page_data[page_count] = p_array.page_data[src_page_index];
293 page_ids[page_count] = p_array.page_ids[src_page_index];
294
295 uint32_t take = MIN(p_array.count, page_size); //pages to take away
296 p_array.count -= take;
297 count += take;
298 src_page_index++;
299 }
300
301 //handle the remainder page if exists
302 if (remainder_page) {
303 uint32_t new_remainder = count & page_size_mask;
304
305 if (new_remainder > 0) {
306 //must merge old remainder with new remainder
307
308 T *dst_page = page_data[_get_pages_in_use() - 1];
309 uint32_t to_copy = MIN(page_size - new_remainder, remainder);
310
311 for (uint32_t i = 0; i < to_copy; i++) {
312 if (!std::is_trivially_constructible<T>::value) {
313 memnew_placement(&dst_page[i + new_remainder], T(remainder_page[i + remainder - to_copy]));
314 } else {
315 dst_page[i + new_remainder] = remainder_page[i + remainder - to_copy];
316 }
317
318 if (!std::is_trivially_destructible<T>::value) {
319 remainder_page[i + remainder - to_copy].~T();
320 }
321 }
322
323 remainder -= to_copy; //subtract what was copied from remainder
324 count += to_copy; //add what was copied to the count
325
326 if (remainder == 0) {
327 //entire remainder copied, let go of remainder page
328 page_pool->free_page(remainder_page_id);
329 remainder_page = nullptr;
330 }
331 }
332
333 if (remainder > 0) {
334 //there is still remainder, append it
335 uint32_t page_count = _get_pages_in_use();
336 uint32_t new_page_count = page_count + 1;
337
338 if (unlikely(new_page_count > max_pages_used)) {
339 _grow_page_array(); //keep out of inline
340 }
341
342 page_data[page_count] = remainder_page;
343 page_ids[page_count] = remainder_page_id;
344
345 count += remainder;
346 }
347 }
348 }
349
350 _FORCE_INLINE_ uint64_t size() const {
351 return count;
352 }
353
354 void set_page_pool(PagedArrayPool<T> *p_page_pool) {
355 ERR_FAIL_COND(max_pages_used > 0); // Safety check.
356
357 page_pool = p_page_pool;
358 page_size_mask = page_pool->get_page_size_mask();
359 page_size_shift = page_pool->get_page_size_shift();
360 }
361
362 ~PagedArray() {
363 reset();
364 }
365};
366
367#endif // PAGED_ARRAY_H
368