1 | /*---------------------------------------------------------------------------- |
2 | Copyright (c) 2018, Microsoft Research, Daan Leijen |
3 | This is free software; you can redistribute it and/or modify it under the |
4 | terms of the MIT license. A copy of the license can be found in the file |
5 | "LICENSE" at the root of this distribution. |
6 | -----------------------------------------------------------------------------*/ |
7 | |
8 | #include "mimalloc.h" |
9 | #include "mimalloc-internal.h" |
10 | #include "mimalloc-atomic.h" |
11 | |
12 | #include <string.h> // memset, memcpy |
13 | |
14 | |
15 | /* ----------------------------------------------------------- |
16 | Helpers |
17 | ----------------------------------------------------------- */ |
18 | |
19 | // return `true` if ok, `false` to break |
20 | typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2); |
21 | |
22 | // Visit all pages in a heap; returns `false` if break was called. |
23 | static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2) |
24 | { |
25 | if (heap==NULL || heap->page_count==0) return 0; |
26 | |
27 | // visit all pages |
28 | #if MI_DEBUG>1 |
29 | size_t total = heap->page_count; |
30 | #endif |
31 | size_t count = 0; |
32 | for (size_t i = 0; i <= MI_BIN_FULL; i++) { |
33 | mi_page_queue_t* pq = &heap->pages[i]; |
34 | mi_page_t* page = pq->first; |
35 | while(page != NULL) { |
36 | mi_page_t* next = page->next; // save next in case the page gets removed from the queue |
37 | mi_assert_internal(page->heap == heap); |
38 | count++; |
39 | if (!fn(heap, pq, page, arg1, arg2)) return false; |
40 | page = next; // and continue |
41 | } |
42 | } |
43 | mi_assert_internal(count == total); |
44 | return true; |
45 | } |
46 | |
47 | |
48 | #if MI_DEBUG>1 |
49 | static bool _mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
50 | UNUSED(arg1); |
51 | UNUSED(arg2); |
52 | UNUSED(pq); |
53 | mi_assert_internal(page->heap == heap); |
54 | mi_segment_t* segment = _mi_page_segment(page); |
55 | mi_assert_internal(segment->thread_id == heap->thread_id); |
56 | mi_assert_expensive(_mi_page_is_valid(page)); |
57 | return true; |
58 | } |
59 | |
60 | static bool mi_heap_is_valid(mi_heap_t* heap) { |
61 | mi_assert_internal(heap!=NULL); |
62 | mi_heap_visit_pages(heap, &_mi_heap_page_is_valid, NULL, NULL); |
63 | return true; |
64 | } |
65 | #endif |
66 | |
67 | |
68 | |
69 | |
70 | /* ----------------------------------------------------------- |
71 | "Collect" pages by migrating `local_free` and `thread_free` |
72 | lists and freeing empty pages. This is done when a thread |
73 | stops (and in that case abandons pages if there are still |
74 | blocks alive) |
75 | ----------------------------------------------------------- */ |
76 | |
77 | typedef enum mi_collect_e { |
78 | NORMAL, |
79 | FORCE, |
80 | ABANDON |
81 | } mi_collect_t; |
82 | |
83 | |
84 | static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) { |
85 | UNUSED(arg2); |
86 | UNUSED(heap); |
87 | mi_collect_t collect = *((mi_collect_t*)arg_collect); |
88 | _mi_page_free_collect(page, collect >= ABANDON); |
89 | if (mi_page_all_free(page)) { |
90 | // no more used blocks, free the page. TODO: should we retire here and be less aggressive? |
91 | _mi_page_free(page, pq, collect != NORMAL); |
92 | } |
93 | else if (collect == ABANDON) { |
94 | // still used blocks but the thread is done; abandon the page |
95 | _mi_page_abandon(page, pq); |
96 | } |
97 | return true; // don't break |
98 | } |
99 | |
100 | static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
101 | UNUSED(arg1); |
102 | UNUSED(arg2); |
103 | UNUSED(heap); |
104 | UNUSED(pq); |
105 | _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE); |
106 | return true; // don't break |
107 | } |
108 | |
109 | static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) |
110 | { |
111 | if (!mi_heap_is_initialized(heap)) return; |
112 | _mi_deferred_free(heap, collect > NORMAL); |
113 | |
114 | // collect (some) abandoned pages |
115 | if (collect >= NORMAL && !heap->no_reclaim) { |
116 | if (collect == NORMAL) { |
117 | // this may free some segments (but also take ownership of abandoned pages) |
118 | _mi_segment_try_reclaim_abandoned(heap, false, &heap->tld->segments); |
119 | } |
120 | #if MI_DEBUG |
121 | else if (collect == ABANDON && _mi_is_main_thread() && mi_heap_is_backing(heap)) { |
122 | // the main thread is abandoned, try to free all abandoned segments. |
123 | // if all memory is freed by now, all segments should be freed. |
124 | _mi_segment_try_reclaim_abandoned(heap, true, &heap->tld->segments); |
125 | } |
126 | #endif |
127 | } |
128 | |
129 | // if abandoning, mark all pages to no longer add to delayed_free |
130 | if (collect == ABANDON) { |
131 | //for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) { |
132 | // _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE |
133 | //} |
134 | mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); |
135 | } |
136 | |
137 | // free thread delayed blocks. |
138 | // (if abandoning, after this there are no more local references into the pages.) |
139 | _mi_heap_delayed_free(heap); |
140 | |
141 | // collect all pages owned by this thread |
142 | mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); |
143 | mi_assert_internal( collect != ABANDON || heap->thread_delayed_free == NULL ); |
144 | |
145 | // collect segment caches |
146 | if (collect >= FORCE) { |
147 | _mi_segment_thread_collect(&heap->tld->segments); |
148 | } |
149 | |
150 | // collect regions |
151 | if (collect >= FORCE && _mi_is_main_thread()) { |
152 | _mi_mem_collect(&heap->tld->stats); |
153 | } |
154 | } |
155 | |
156 | void _mi_heap_collect_abandon(mi_heap_t* heap) { |
157 | mi_heap_collect_ex(heap, ABANDON); |
158 | } |
159 | |
160 | void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept { |
161 | mi_heap_collect_ex(heap, (force ? FORCE : NORMAL)); |
162 | } |
163 | |
164 | void mi_collect(bool force) mi_attr_noexcept { |
165 | mi_heap_collect(mi_get_default_heap(), force); |
166 | } |
167 | |
168 | |
169 | /* ----------------------------------------------------------- |
170 | Heap new |
171 | ----------------------------------------------------------- */ |
172 | |
173 | mi_heap_t* mi_heap_get_default(void) { |
174 | mi_thread_init(); |
175 | return mi_get_default_heap(); |
176 | } |
177 | |
178 | mi_heap_t* mi_heap_get_backing(void) { |
179 | mi_heap_t* heap = mi_heap_get_default(); |
180 | mi_assert_internal(heap!=NULL); |
181 | mi_heap_t* bheap = heap->tld->heap_backing; |
182 | mi_assert_internal(bheap!=NULL); |
183 | mi_assert_internal(bheap->thread_id == _mi_thread_id()); |
184 | return bheap; |
185 | } |
186 | |
187 | uintptr_t _mi_heap_random(mi_heap_t* heap) { |
188 | uintptr_t r = heap->random; |
189 | heap->random = _mi_random_shuffle(r); |
190 | return r; |
191 | } |
192 | |
193 | mi_heap_t* mi_heap_new(void) { |
194 | mi_heap_t* bheap = mi_heap_get_backing(); |
195 | mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); |
196 | if (heap==NULL) return NULL; |
197 | memcpy(heap, &_mi_heap_empty, sizeof(mi_heap_t)); |
198 | heap->tld = bheap->tld; |
199 | heap->thread_id = _mi_thread_id(); |
200 | heap->cookie = ((uintptr_t)heap ^ _mi_heap_random(bheap)) | 1; |
201 | heap->random = _mi_heap_random(bheap); |
202 | heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe |
203 | return heap; |
204 | } |
205 | |
206 | // zero out the page queues |
207 | static void mi_heap_reset_pages(mi_heap_t* heap) { |
208 | mi_assert_internal(mi_heap_is_initialized(heap)); |
209 | // TODO: copy full empty heap instead? |
210 | memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct)); |
211 | #ifdef MI_MEDIUM_DIRECT |
212 | memset(&heap->pages_free_medium, 0, sizeof(heap->pages_free_medium)); |
213 | #endif |
214 | memcpy(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages)); |
215 | heap->thread_delayed_free = NULL; |
216 | heap->page_count = 0; |
217 | } |
218 | |
219 | // called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources. |
220 | static void mi_heap_free(mi_heap_t* heap) { |
221 | mi_assert_internal(mi_heap_is_initialized(heap)); |
222 | if (mi_heap_is_backing(heap)) return; // dont free the backing heap |
223 | |
224 | // reset default |
225 | if (mi_heap_is_default(heap)) { |
226 | _mi_heap_set_default_direct(heap->tld->heap_backing); |
227 | } |
228 | // and free the used memory |
229 | mi_free(heap); |
230 | } |
231 | |
232 | |
233 | /* ----------------------------------------------------------- |
234 | Heap destroy |
235 | ----------------------------------------------------------- */ |
236 | |
237 | static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { |
238 | UNUSED(arg1); |
239 | UNUSED(arg2); |
240 | UNUSED(heap); |
241 | UNUSED(pq); |
242 | |
243 | // ensure no more thread_delayed_free will be added |
244 | _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE); |
245 | |
246 | // stats |
247 | if (page->block_size > MI_LARGE_OBJ_SIZE_MAX) { |
248 | if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) { |
249 | _mi_stat_decrease(&heap->tld->stats.giant,page->block_size); |
250 | } |
251 | else { |
252 | _mi_stat_decrease(&heap->tld->stats.huge, page->block_size); |
253 | } |
254 | } |
255 | #if (MI_STAT>1) |
256 | size_t inuse = page->used - page->thread_freed; |
257 | if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) { |
258 | mi_heap_stat_decrease(heap,normal[_mi_bin(page->block_size)], inuse); |
259 | } |
260 | mi_heap_stat_decrease(heap,malloc, page->block_size * inuse); // todo: off for aligned blocks... |
261 | #endif |
262 | |
263 | // pretend it is all free now |
264 | mi_assert_internal(page->thread_freed<=0xFFFF); |
265 | page->used = (uint16_t)page->thread_freed; |
266 | |
267 | // and free the page |
268 | _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments); |
269 | |
270 | return true; // keep going |
271 | } |
272 | |
273 | void _mi_heap_destroy_pages(mi_heap_t* heap) { |
274 | mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL); |
275 | mi_heap_reset_pages(heap); |
276 | } |
277 | |
278 | void mi_heap_destroy(mi_heap_t* heap) { |
279 | mi_assert(mi_heap_is_initialized(heap)); |
280 | mi_assert(heap->no_reclaim); |
281 | mi_assert_expensive(mi_heap_is_valid(heap)); |
282 | if (!mi_heap_is_initialized(heap)) return; |
283 | if (!heap->no_reclaim) { |
284 | // don't free in case it may contain reclaimed pages |
285 | mi_heap_delete(heap); |
286 | } |
287 | else { |
288 | // free all pages |
289 | _mi_heap_destroy_pages(heap); |
290 | mi_heap_free(heap); |
291 | } |
292 | } |
293 | |
294 | |
295 | |
296 | /* ----------------------------------------------------------- |
297 | Safe Heap delete |
298 | ----------------------------------------------------------- */ |
299 | |
300 | // Tranfer the pages from one heap to the other |
301 | static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { |
302 | mi_assert_internal(heap!=NULL); |
303 | if (from==NULL || from->page_count == 0) return; |
304 | |
305 | // unfull all full pages in the `from` heap |
306 | mi_page_t* page = from->pages[MI_BIN_FULL].first; |
307 | while (page != NULL) { |
308 | mi_page_t* next = page->next; |
309 | _mi_page_unfull(page); |
310 | page = next; |
311 | } |
312 | mi_assert_internal(from->pages[MI_BIN_FULL].first == NULL); |
313 | |
314 | // free outstanding thread delayed free blocks |
315 | _mi_heap_delayed_free(from); |
316 | |
317 | // transfer all pages by appending the queues; this will set |
318 | // a new heap field which is ok as all pages are unfull'd and thus |
319 | // other threads won't access this field anymore (see `mi_free_block_mt`) |
320 | for (size_t i = 0; i < MI_BIN_FULL; i++) { |
321 | mi_page_queue_t* pq = &heap->pages[i]; |
322 | mi_page_queue_t* append = &from->pages[i]; |
323 | size_t pcount = _mi_page_queue_append(heap, pq, append); |
324 | heap->page_count += pcount; |
325 | from->page_count -= pcount; |
326 | } |
327 | mi_assert_internal(from->thread_delayed_free == NULL); |
328 | mi_assert_internal(from->page_count == 0); |
329 | |
330 | // and reset the `from` heap |
331 | mi_heap_reset_pages(from); |
332 | } |
333 | |
334 | // Safe delete a heap without freeing any still allocated blocks in that heap. |
335 | void mi_heap_delete(mi_heap_t* heap) |
336 | { |
337 | mi_assert(mi_heap_is_initialized(heap)); |
338 | mi_assert_expensive(mi_heap_is_valid(heap)); |
339 | if (!mi_heap_is_initialized(heap)) return; |
340 | |
341 | if (!mi_heap_is_backing(heap)) { |
342 | // tranfer still used pages to the backing heap |
343 | mi_heap_absorb(heap->tld->heap_backing, heap); |
344 | } |
345 | else { |
346 | // the backing heap abandons its pages |
347 | _mi_heap_collect_abandon(heap); |
348 | } |
349 | mi_assert_internal(heap->page_count==0); |
350 | mi_heap_free(heap); |
351 | } |
352 | |
353 | mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { |
354 | mi_assert(mi_heap_is_initialized(heap)); |
355 | if (!mi_heap_is_initialized(heap)) return NULL; |
356 | mi_assert_expensive(mi_heap_is_valid(heap)); |
357 | mi_heap_t* old = mi_get_default_heap(); |
358 | _mi_heap_set_default_direct(heap); |
359 | return old; |
360 | } |
361 | |
362 | |
363 | |
364 | |
365 | /* ----------------------------------------------------------- |
366 | Analysis |
367 | ----------------------------------------------------------- */ |
368 | |
369 | // static since it is not thread safe to access heaps from other threads. |
370 | static mi_heap_t* mi_heap_of_block(const void* p) { |
371 | if (p == NULL) return NULL; |
372 | mi_segment_t* segment = _mi_ptr_segment(p); |
373 | bool valid = (_mi_ptr_cookie(segment) == segment->cookie); |
374 | mi_assert_internal(valid); |
375 | if (mi_unlikely(!valid)) return NULL; |
376 | return _mi_segment_page_of(segment,p)->heap; |
377 | } |
378 | |
379 | bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { |
380 | mi_assert(heap != NULL); |
381 | if (!mi_heap_is_initialized(heap)) return false; |
382 | return (heap == mi_heap_of_block(p)); |
383 | } |
384 | |
385 | |
386 | static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) { |
387 | UNUSED(heap); |
388 | UNUSED(pq); |
389 | bool* found = (bool*)vfound; |
390 | mi_segment_t* segment = _mi_page_segment(page); |
391 | void* start = _mi_page_start(segment, page, NULL); |
392 | void* end = (uint8_t*)start + (page->capacity * page->block_size); |
393 | *found = (p >= start && p < end); |
394 | return (!*found); // continue if not found |
395 | } |
396 | |
397 | bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { |
398 | mi_assert(heap != NULL); |
399 | if (!mi_heap_is_initialized(heap)) return false; |
400 | if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers |
401 | bool found = false; |
402 | mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found); |
403 | return found; |
404 | } |
405 | |
406 | bool mi_check_owned(const void* p) { |
407 | return mi_heap_check_owned(mi_get_default_heap(), p); |
408 | } |
409 | |
410 | /* ----------------------------------------------------------- |
411 | Visit all heap blocks and areas |
412 | Todo: enable visiting abandoned pages, and |
413 | enable visiting all blocks of all heaps across threads |
414 | ----------------------------------------------------------- */ |
415 | |
416 | // Separate struct to keep `mi_page_t` out of the public interface |
417 | typedef struct mi_heap_area_ex_s { |
418 | mi_heap_area_t area; |
419 | mi_page_t* page; |
420 | } mi_heap_area_ex_t; |
421 | |
422 | static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) { |
423 | mi_assert(xarea != NULL); |
424 | if (xarea==NULL) return true; |
425 | const mi_heap_area_t* area = &xarea->area; |
426 | mi_page_t* page = xarea->page; |
427 | mi_assert(page != NULL); |
428 | if (page == NULL) return true; |
429 | |
430 | _mi_page_free_collect(page,true); |
431 | mi_assert_internal(page->local_free == NULL); |
432 | if (page->used == 0) return true; |
433 | |
434 | size_t psize; |
435 | uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize); |
436 | |
437 | if (page->capacity == 1) { |
438 | // optimize page with one block |
439 | mi_assert_internal(page->used == 1 && page->free == NULL); |
440 | return visitor(page->heap, area, pstart, page->block_size, arg); |
441 | } |
442 | |
443 | // create a bitmap of free blocks. |
444 | #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) |
445 | uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)]; |
446 | memset(free_map, 0, sizeof(free_map)); |
447 | |
448 | size_t free_count = 0; |
449 | for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { |
450 | free_count++; |
451 | mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); |
452 | size_t offset = (uint8_t*)block - pstart; |
453 | mi_assert_internal(offset % page->block_size == 0); |
454 | size_t blockidx = offset / page->block_size; // Todo: avoid division? |
455 | mi_assert_internal( blockidx < MI_MAX_BLOCKS); |
456 | size_t bitidx = (blockidx / sizeof(uintptr_t)); |
457 | size_t bit = blockidx - (bitidx * sizeof(uintptr_t)); |
458 | free_map[bitidx] |= ((uintptr_t)1 << bit); |
459 | } |
460 | mi_assert_internal(page->capacity == (free_count + page->used)); |
461 | |
462 | // walk through all blocks skipping the free ones |
463 | size_t used_count = 0; |
464 | for (size_t i = 0; i < page->capacity; i++) { |
465 | size_t bitidx = (i / sizeof(uintptr_t)); |
466 | size_t bit = i - (bitidx * sizeof(uintptr_t)); |
467 | uintptr_t m = free_map[bitidx]; |
468 | if (bit == 0 && m == UINTPTR_MAX) { |
469 | i += (sizeof(uintptr_t) - 1); // skip a run of free blocks |
470 | } |
471 | else if ((m & ((uintptr_t)1 << bit)) == 0) { |
472 | used_count++; |
473 | uint8_t* block = pstart + (i * page->block_size); |
474 | if (!visitor(page->heap, area, block, page->block_size, arg)) return false; |
475 | } |
476 | } |
477 | mi_assert_internal(page->used == used_count); |
478 | return true; |
479 | } |
480 | |
481 | typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); |
482 | |
483 | |
484 | static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { |
485 | UNUSED(heap); |
486 | UNUSED(pq); |
487 | mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; |
488 | mi_heap_area_ex_t xarea; |
489 | xarea.page = page; |
490 | xarea.area.reserved = page->reserved * page->block_size; |
491 | xarea.area.committed = page->capacity * page->block_size; |
492 | xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL); |
493 | xarea.area.used = page->used - page->thread_freed; // race is ok |
494 | xarea.area.block_size = page->block_size; |
495 | return fun(heap, &xarea, arg); |
496 | } |
497 | |
498 | // Visit all heap pages as areas |
499 | static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) { |
500 | if (visitor == NULL) return false; |
501 | return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{ |
502 | } |
503 | |
504 | // Just to pass arguments |
505 | typedef struct mi_visit_blocks_args_s { |
506 | bool visit_blocks; |
507 | mi_block_visit_fun* visitor; |
508 | void* arg; |
509 | } mi_visit_blocks_args_t; |
510 | |
511 | static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) { |
512 | mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg; |
513 | if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false; |
514 | if (args->visit_blocks) { |
515 | return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg); |
516 | } |
517 | else { |
518 | return true; |
519 | } |
520 | } |
521 | |
522 | // Visit all blocks in a heap |
523 | bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { |
524 | mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; |
525 | return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); |
526 | } |
527 | |
528 | |