1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#include "tbb/scalable_allocator.h"
18#include "tbb/atomic.h"
19#define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1
20#include "harness.h"
21#include "harness_barrier.h"
22#include "harness_tls.h"
23#if !__TBB_SOURCE_DIRECTLY_INCLUDED
24#include "harness_tbb_independence.h"
25#endif
26
27template<typename T>
28static inline T alignUp (T arg, uintptr_t alignment) {
29 return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1));
30}
31
32struct PoolSpace: NoCopy {
33 size_t pos;
34 int regions;
35 size_t bufSize;
36 char *space;
37
38 static const size_t BUF_SIZE = 8*1024*1024;
39
40 PoolSpace(size_t bufSz = BUF_SIZE) :
41 pos(0), regions(0),
42 bufSize(bufSz), space(new char[bufSize]) {
43 memset(space, 0, bufSize);
44 }
45 ~PoolSpace() {
46 delete []space;
47 }
48};
49
50static PoolSpace *poolSpace;
51
52struct MallocPoolHeader {
53 void *rawPtr;
54 size_t userSize;
55};
56
57static tbb::atomic<int> liveRegions;
58
59static void *getMallocMem(intptr_t /*pool_id*/, size_t &bytes)
60{
61 void *rawPtr = malloc(bytes+sizeof(MallocPoolHeader)+1);
62 if (!rawPtr)
63 return NULL;
64 // +1 to check working with unaligned space
65 void *ret = (void *)((uintptr_t)rawPtr+sizeof(MallocPoolHeader)+1);
66
67 MallocPoolHeader *hdr = (MallocPoolHeader*)ret-1;
68 hdr->rawPtr = rawPtr;
69 hdr->userSize = bytes;
70
71 liveRegions++;
72
73 return ret;
74}
75
76static int putMallocMem(intptr_t /*pool_id*/, void *ptr, size_t bytes)
77{
78 MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1;
79 ASSERT(bytes == hdr->userSize, "Invalid size in pool callback.");
80 free(hdr->rawPtr);
81
82 liveRegions--;
83
84 return 0;
85}
86
87void TestPoolReset()
88{
89 rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
90 rml::MemoryPool *pool;
91
92 pool_create_v1(0, &pol, &pool);
93 for (int i=0; i<100; i++) {
94 ASSERT(pool_malloc(pool, 8), NULL);
95 ASSERT(pool_malloc(pool, 50*1024), NULL);
96 }
97 int regionsBeforeReset = liveRegions;
98 bool ok = pool_reset(pool);
99 ASSERT(ok, NULL);
100 for (int i=0; i<100; i++) {
101 ASSERT(pool_malloc(pool, 8), NULL);
102 ASSERT(pool_malloc(pool, 50*1024), NULL);
103 }
104 ASSERT(regionsBeforeReset == liveRegions,
105 "Expected no new regions allocation.");
106 ok = pool_destroy(pool);
107 ASSERT(ok, NULL);
108 ASSERT(!liveRegions, "Expected all regions were released.");
109}
110
111class SharedPoolRun: NoAssign {
112 static long threadNum;
113 static Harness::SpinBarrier startB,
114 mallocDone;
115 static rml::MemoryPool *pool;
116 static void **crossThread,
117 **afterTerm;
118public:
119 static const int OBJ_CNT = 100;
120
121 static void init(int num, rml::MemoryPool *pl, void **crThread, void **aTerm) {
122 threadNum = num;
123 pool = pl;
124 crossThread = crThread;
125 afterTerm = aTerm;
126 startB.initialize(threadNum);
127 mallocDone.initialize(threadNum);
128 }
129
130 void operator()( int id ) const {
131 const int ITERS = 1000;
132 void *local[ITERS];
133
134 startB.wait();
135 for (int i=id*OBJ_CNT; i<(id+1)*OBJ_CNT; i++) {
136 afterTerm[i] = pool_malloc(pool, i%2? 8*1024 : 9*1024);
137 memset(afterTerm[i], i, i%2? 8*1024 : 9*1024);
138 crossThread[i] = pool_malloc(pool, i%2? 9*1024 : 8*1024);
139 memset(crossThread[i], i, i%2? 9*1024 : 8*1024);
140 }
141
142 for (int i=1; i<ITERS; i+=2) {
143 local[i-1] = pool_malloc(pool, 6*1024);
144 memset(local[i-1], i, 6*1024);
145 local[i] = pool_malloc(pool, 16*1024);
146 memset(local[i], i, 16*1024);
147 }
148 mallocDone.wait();
149 int myVictim = threadNum-id-1;
150 for (int i=myVictim*OBJ_CNT; i<(myVictim+1)*OBJ_CNT; i++)
151 pool_free(pool, crossThread[i]);
152 for (int i=0; i<ITERS; i++)
153 pool_free(pool, local[i]);
154 }
155};
156
157long SharedPoolRun::threadNum;
158Harness::SpinBarrier SharedPoolRun::startB,
159 SharedPoolRun::mallocDone;
160rml::MemoryPool *SharedPoolRun::pool;
161void **SharedPoolRun::crossThread,
162 **SharedPoolRun::afterTerm;
163
164// single pool shared by different threads
165void TestSharedPool()
166{
167 rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
168 rml::MemoryPool *pool;
169
170 pool_create_v1(0, &pol, &pool);
171 void **crossThread = new void*[MaxThread * SharedPoolRun::OBJ_CNT];
172 void **afterTerm = new void*[MaxThread * SharedPoolRun::OBJ_CNT];
173
174 for (int p=MinThread; p<=MaxThread; p++) {
175 SharedPoolRun::init(p, pool, crossThread, afterTerm);
176 SharedPoolRun thr;
177
178 void *hugeObj = pool_malloc(pool, 10*1024*1024);
179 ASSERT(hugeObj, NULL);
180
181 NativeParallelFor( p, thr );
182
183 pool_free(pool, hugeObj);
184 for (int i=0; i<p*SharedPoolRun::OBJ_CNT; i++)
185 pool_free(pool, afterTerm[i]);
186 }
187 delete []afterTerm;
188 delete []crossThread;
189
190 bool ok = pool_destroy(pool);
191 ASSERT(ok, NULL);
192 ASSERT(!liveRegions, "Expected all regions were released.");
193}
194
195void *CrossThreadGetMem(intptr_t pool_id, size_t &bytes)
196{
197 if (poolSpace[pool_id].pos + bytes > poolSpace[pool_id].bufSize)
198 return NULL;
199
200 void *ret = poolSpace[pool_id].space + poolSpace[pool_id].pos;
201 poolSpace[pool_id].pos += bytes;
202 poolSpace[pool_id].regions++;
203
204 return ret;
205}
206
207int CrossThreadPutMem(intptr_t pool_id, void* /*raw_ptr*/, size_t /*raw_bytes*/)
208{
209 poolSpace[pool_id].regions--;
210 return 0;
211}
212
213class CrossThreadRun: NoAssign {
214 static long number_of_threads;
215 static Harness::SpinBarrier barrier;
216 static rml::MemoryPool **pool;
217 static char **obj;
218public:
219 static void initBarrier(unsigned thrds) { barrier.initialize(thrds); }
220 static void init(long num) {
221 number_of_threads = num;
222 pool = new rml::MemoryPool*[number_of_threads];
223 poolSpace = new PoolSpace[number_of_threads];
224 obj = new char*[number_of_threads];
225 }
226 static void destroy() {
227 for (long i=0; i<number_of_threads; i++)
228 ASSERT(!poolSpace[i].regions, "Memory leak detected");
229 delete []pool;
230 delete []poolSpace;
231 delete []obj;
232 }
233 CrossThreadRun() {}
234 void operator()( int id ) const {
235 rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
236 const int objLen = 10*id;
237
238 pool_create_v1(id, &pol, &pool[id]);
239 obj[id] = (char*)pool_malloc(pool[id], objLen);
240 ASSERT(obj[id], NULL);
241 memset(obj[id], id, objLen);
242
243 {
244 const size_t lrgSz = 2*16*1024;
245 void *ptrLarge = pool_malloc(pool[id], lrgSz);
246 ASSERT(ptrLarge, NULL);
247 memset(ptrLarge, 1, lrgSz);
248 // consume all small objects
249 while (pool_malloc(pool[id], 5 * 1024));
250 // releasing of large object will not give a chance to allocate more
251 // since only fixed pool can look at other bins aligned/notAligned
252 pool_free(pool[id], ptrLarge);
253 ASSERT(!pool_malloc(pool[id], 5*1024), NULL);
254 }
255
256 barrier.wait();
257 int myPool = number_of_threads-id-1;
258 for (int i=0; i<10*myPool; i++)
259 ASSERT(myPool==obj[myPool][i], NULL);
260 pool_free(pool[myPool], obj[myPool]);
261 bool ok = pool_destroy(pool[myPool]);
262 ASSERT(ok, NULL);
263 }
264};
265
266long CrossThreadRun::number_of_threads;
267Harness::SpinBarrier CrossThreadRun::barrier;
268rml::MemoryPool **CrossThreadRun::pool;
269char **CrossThreadRun::obj;
270
271// pools created, used and destroyed by different threads
272void TestCrossThreadPools()
273{
274 for (int p=MinThread; p<=MaxThread; p++) {
275 CrossThreadRun::initBarrier(p);
276 CrossThreadRun::init(p);
277 NativeParallelFor( p, CrossThreadRun() );
278 for (int i=0; i<p; i++)
279 ASSERT(!poolSpace[i].regions, "Region leak detected");
280 CrossThreadRun::destroy();
281 }
282}
283
284// buffer is too small to pool be created, but must not leak resources
285void TestTooSmallBuffer()
286{
287 poolSpace = new PoolSpace(8*1024);
288
289 rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
290 rml::MemoryPool *pool;
291 pool_create_v1(0, &pol, &pool);
292 bool ok = pool_destroy(pool);
293 ASSERT(ok, NULL);
294 ASSERT(!poolSpace[0].regions, "No leaks.");
295
296 delete poolSpace;
297}
298
299class FixedPoolHeadBase : NoAssign {
300 size_t size;
301 intptr_t used;
302 char *data;
303public:
304 FixedPoolHeadBase(size_t s) : size(s), used(false) {
305 data = new char[size];
306 }
307 void *useData(size_t &bytes) {
308 intptr_t wasUsed = __TBB_FetchAndStoreW(&used, true);
309 ASSERT(!wasUsed, "The buffer must not be used twice.");
310 bytes = size;
311 return data;
312 }
313 ~FixedPoolHeadBase() {
314 delete []data;
315 }
316};
317
318template<size_t SIZE>
319class FixedPoolHead : FixedPoolHeadBase {
320public:
321 FixedPoolHead() : FixedPoolHeadBase(SIZE) { }
322};
323
324static void *fixedBufGetMem(intptr_t pool_id, size_t &bytes)
325{
326 return ((FixedPoolHeadBase*)pool_id)->useData(bytes);
327}
328
329class FixedPoolUse: NoAssign {
330 static Harness::SpinBarrier startB;
331 rml::MemoryPool *pool;
332 size_t reqSize;
333 int iters;
334public:
335 FixedPoolUse(unsigned threads, rml::MemoryPool *p, size_t sz, int it) :
336 pool(p), reqSize(sz), iters(it) {
337 startB.initialize(threads);
338 }
339 void operator()( int /*id*/ ) const {
340 startB.wait();
341 for (int i=0; i<iters; i++) {
342 void *o = pool_malloc(pool, reqSize);
343 ASSERT(o, NULL);
344 pool_free(pool, o);
345 }
346 }
347};
348
349Harness::SpinBarrier FixedPoolUse::startB;
350
351class FixedPoolNomem: NoAssign {
352 Harness::SpinBarrier *startB;
353 rml::MemoryPool *pool;
354public:
355 FixedPoolNomem(Harness::SpinBarrier *b, rml::MemoryPool *p) :
356 startB(b), pool(p) {}
357 void operator()(int id) const {
358 startB->wait();
359 void *o = pool_malloc(pool, id%2? 64 : 128*1024);
360 ASSERT(!o, "All memory must be consumed.");
361 }
362};
363
364class FixedPoolSomeMem: NoAssign {
365 Harness::SpinBarrier *barrier;
366 rml::MemoryPool *pool;
367public:
368 FixedPoolSomeMem(Harness::SpinBarrier *b, rml::MemoryPool *p) :
369 barrier(b), pool(p) {}
370 void operator()(int id) const {
371 barrier->wait();
372 Harness::Sleep(2*id);
373 void *o = pool_malloc(pool, id%2? 64 : 128*1024);
374 barrier->wait();
375 pool_free(pool, o);
376 }
377};
378
379bool haveEnoughSpace(rml::MemoryPool *pool, size_t sz)
380{
381 if (void *p = pool_malloc(pool, sz)) {
382 pool_free(pool, p);
383 return true;
384 }
385 return false;
386}
387
388void TestFixedBufferPool()
389{
390 const int ITERS = 7;
391 const size_t MAX_OBJECT = 7*1024*1024;
392 void *ptrs[ITERS];
393 rml::MemPoolPolicy pol(fixedBufGetMem, NULL, 0, /*fixedSizePool=*/true,
394 /*keepMemTillDestroy=*/false);
395 rml::MemoryPool *pool;
396 {
397 FixedPoolHead<MAX_OBJECT + 1024*1024> head;
398
399 pool_create_v1((intptr_t)&head, &pol, &pool);
400 {
401 NativeParallelFor( 1, FixedPoolUse(1, pool, MAX_OBJECT, 2) );
402
403 for (int i=0; i<ITERS; i++) {
404 ptrs[i] = pool_malloc(pool, MAX_OBJECT/ITERS);
405 ASSERT(ptrs[i], NULL);
406 }
407 for (int i=0; i<ITERS; i++)
408 pool_free(pool, ptrs[i]);
409
410 NativeParallelFor( 1, FixedPoolUse(1, pool, MAX_OBJECT, 1) );
411 }
412 // each thread asks for an MAX_OBJECT/p/2 object,
413 // /2 is to cover fragmentation
414 for (int p=MinThread; p<=MaxThread; p++)
415 NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 10000) );
416 {
417 const int p=128;
418 NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 1) );
419 }
420 {
421 size_t maxSz;
422 const int p = 512;
423 Harness::SpinBarrier barrier(p);
424
425 // Find maximal useful object size. Start with MAX_OBJECT/2,
426 // as the pool might be fragmented by BootStrapBlocks consumed during
427 // FixedPoolRun.
428 size_t l, r;
429 ASSERT(haveEnoughSpace(pool, MAX_OBJECT/2), NULL);
430 for (l = MAX_OBJECT/2, r = MAX_OBJECT + 1024*1024; l < r-1; ) {
431 size_t mid = (l+r)/2;
432 if (haveEnoughSpace(pool, mid))
433 l = mid;
434 else
435 r = mid;
436 }
437 maxSz = l;
438 ASSERT(!haveEnoughSpace(pool, maxSz+1), "Expect to find boundary value.");
439 // consume all available memory
440 void *largeObj = pool_malloc(pool, maxSz);
441 ASSERT(largeObj, NULL);
442 void *o = pool_malloc(pool, 64);
443 if (o) // pool fragmented, skip FixedPoolNomem
444 pool_free(pool, o);
445 else
446 NativeParallelFor( p, FixedPoolNomem(&barrier, pool) );
447 pool_free(pool, largeObj);
448 // keep some space unoccupied
449 largeObj = pool_malloc(pool, maxSz-512*1024);
450 ASSERT(largeObj, NULL);
451 NativeParallelFor( p, FixedPoolSomeMem(&barrier, pool) );
452 pool_free(pool, largeObj);
453 }
454 bool ok = pool_destroy(pool);
455 ASSERT(ok, NULL);
456 }
457 // check that fresh untouched pool can successfully fulfil requests from 128 threads
458 {
459 FixedPoolHead<MAX_OBJECT + 1024*1024> head;
460 pool_create_v1((intptr_t)&head, &pol, &pool);
461 int p=128;
462 NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 1) );
463 bool ok = pool_destroy(pool);
464 ASSERT(ok, NULL);
465 }
466}
467
468static size_t currGranularity;
469
470static void *getGranMem(intptr_t /*pool_id*/, size_t &bytes)
471{
472 ASSERT(!(bytes%currGranularity), "Region size mismatch granularity.");
473 return malloc(bytes);
474}
475
476static int putGranMem(intptr_t /*pool_id*/, void *ptr, size_t bytes)
477{
478 ASSERT(!(bytes%currGranularity), "Region size mismatch granularity.");
479 free(ptr);
480 return 0;
481}
482
483void TestPoolGranularity()
484{
485 rml::MemPoolPolicy pol(getGranMem, putGranMem);
486 const size_t grans[] = {4*1024, 2*1024*1024, 6*1024*1024, 10*1024*1024};
487
488 for (unsigned i=0; i<sizeof(grans)/sizeof(grans[0]); i++) {
489 pol.granularity = currGranularity = grans[i];
490 rml::MemoryPool *pool;
491
492 pool_create_v1(0, &pol, &pool);
493 for (int sz=500*1024; sz<16*1024*1024; sz+=101*1024) {
494 void *p = pool_malloc(pool, sz);
495 ASSERT(p, "Can't allocate memory in pool.");
496 pool_free(pool, p);
497 }
498 bool ok = pool_destroy(pool);
499 ASSERT(ok, NULL);
500 }
501}
502
503static size_t putMemAll, getMemAll, getMemSuccessful;
504
505static void *getMemMalloc(intptr_t /*pool_id*/, size_t &bytes)
506{
507 getMemAll++;
508 void *p = malloc(bytes);
509 if (p)
510 getMemSuccessful++;
511 return p;
512}
513
514static int putMemFree(intptr_t /*pool_id*/, void *ptr, size_t /*bytes*/)
515{
516 putMemAll++;
517 free(ptr);
518 return 0;
519}
520
521void TestPoolKeepTillDestroy()
522{
523 const int ITERS = 50*1024;
524 void *ptrs[2*ITERS+1];
525 rml::MemPoolPolicy pol(getMemMalloc, putMemFree);
526 rml::MemoryPool *pool;
527
528 // 1st create default pool that returns memory back to callback,
529 // then use keepMemTillDestroy policy
530 for (int keep=0; keep<2; keep++) {
531 getMemAll = putMemAll = 0;
532 if (keep)
533 pol.keepAllMemory = 1;
534 pool_create_v1(0, &pol, &pool);
535 for (int i=0; i<2*ITERS; i+=2) {
536 ptrs[i] = pool_malloc(pool, 7*1024);
537 ptrs[i+1] = pool_malloc(pool, 10*1024);
538 }
539 ptrs[2*ITERS] = pool_malloc(pool, 8*1024*1024);
540 ASSERT(!putMemAll, NULL);
541 for (int i=0; i<2*ITERS; i++)
542 pool_free(pool, ptrs[i]);
543 pool_free(pool, ptrs[2*ITERS]);
544 size_t totalPutMemCalls = putMemAll;
545 if (keep)
546 ASSERT(!putMemAll, NULL);
547 else {
548 ASSERT(putMemAll, NULL);
549 putMemAll = 0;
550 }
551 size_t getCallsBefore = getMemAll;
552 void *p = pool_malloc(pool, 8*1024*1024);
553 ASSERT(p, NULL);
554 if (keep)
555 ASSERT(getCallsBefore == getMemAll, "Must not lead to new getMem call");
556 size_t putCallsBefore = putMemAll;
557 bool ok = pool_reset(pool);
558 ASSERT(ok, NULL);
559 ASSERT(putCallsBefore == putMemAll, "Pool is not releasing memory during reset.");
560 ok = pool_destroy(pool);
561 ASSERT(ok, NULL);
562 ASSERT(putMemAll, NULL);
563 totalPutMemCalls += putMemAll;
564 ASSERT(getMemAll == totalPutMemCalls, "Memory leak detected.");
565 }
566
567}
568
569static bool memEqual(char *buf, size_t size, int val)
570{
571 bool memEq = true;
572 for (size_t k=0; k<size; k++)
573 if (buf[k] != val)
574 memEq = false;
575 return memEq;
576}
577
578void TestEntries()
579{
580 const int SZ = 4;
581 const int ALGN = 4;
582 size_t size[SZ] = {8, 8000, 9000, 100*1024};
583 size_t algn[ALGN] = {8, 64, 4*1024, 8*1024*1024};
584
585 rml::MemPoolPolicy pol(getGranMem, putGranMem);
586 currGranularity = 1; // not check granularity in the test
587 rml::MemoryPool *pool;
588
589 pool_create_v1(0, &pol, &pool);
590 for (int i=0; i<SZ; i++)
591 for (int j=0; j<ALGN; j++) {
592 char *p = (char*)pool_aligned_malloc(pool, size[i], algn[j]);
593 ASSERT(p && 0==((uintptr_t)p & (algn[j]-1)), NULL);
594 memset(p, j, size[i]);
595
596 size_t curr_algn = algn[rand() % ALGN];
597 size_t curr_sz = size[rand() % SZ];
598 char *p1 = (char*)pool_aligned_realloc(pool, p, curr_sz, curr_algn);
599 ASSERT(p1 && 0==((uintptr_t)p1 & (curr_algn-1)), NULL);
600 ASSERT(memEqual(p1, min(size[i], curr_sz), j), NULL);
601
602 memset(p1, j+1, curr_sz);
603 size_t curr_sz1 = size[rand() % SZ];
604 char *p2 = (char*)pool_realloc(pool, p1, curr_sz1);
605 ASSERT(p2, NULL);
606 ASSERT(memEqual(p2, min(curr_sz1, curr_sz), j+1), NULL);
607
608 pool_free(pool, p2);
609 }
610
611 bool ok = pool_destroy(pool);
612 ASSERT(ok, NULL);
613
614 bool fail = rml::pool_destroy(NULL);
615 ASSERT(!fail, NULL);
616 fail = rml::pool_reset(NULL);
617 ASSERT(!fail, NULL);
618}
619
620rml::MemoryPool *CreateUsablePool(size_t size)
621{
622 rml::MemoryPool *pool;
623 rml::MemPoolPolicy okPolicy(getMemMalloc, putMemFree);
624
625 putMemAll = getMemAll = getMemSuccessful = 0;
626 rml::MemPoolError res = pool_create_v1(0, &okPolicy, &pool);
627 if (res != rml::POOL_OK) {
628 ASSERT(!getMemAll && !putMemAll, "No callbacks after fail.");
629 return NULL;
630 }
631 void *o = pool_malloc(pool, size);
632 if (!getMemSuccessful) {
633 // no memory from callback, valid reason to leave
634 ASSERT(!o, "The pool must be unusable.");
635 return NULL;
636 }
637 ASSERT(o, "Created pool must be useful.");
638 ASSERT(getMemSuccessful == 1 || getMemSuccessful == 5 || getMemAll > getMemSuccessful,
639 "Multiple requests are allowed when unsuccessful request occurred or cannot search in bootstrap memory. ");
640 ASSERT(!putMemAll, NULL);
641 pool_free(pool, o);
642
643 return pool;
644}
645
646void CheckPoolLeaks(size_t poolsAlwaysAvailable)
647{
648 const size_t MAX_POOLS = 16*1000;
649 const int ITERS = 20, CREATED_STABLE = 3;
650 rml::MemoryPool *pools[MAX_POOLS];
651 size_t created, maxCreated = MAX_POOLS;
652 int maxNotChangedCnt = 0;
653
654 // expecting that for ITERS runs, max number of pools that can be created
655 // can be stabilized and still stable CREATED_STABLE times
656 for (int j=0; j<ITERS && maxNotChangedCnt<CREATED_STABLE; j++) {
657 for (created=0; created<maxCreated; created++) {
658 rml::MemoryPool *p = CreateUsablePool(1024);
659 if (!p)
660 break;
661 pools[created] = p;
662 }
663 ASSERT(created>=poolsAlwaysAvailable,
664 "Expect that the reasonable number of pools can be always created.");
665 for (size_t i=0; i<created; i++) {
666 bool ok = pool_destroy(pools[i]);
667 ASSERT(ok, NULL);
668 }
669 if (created < maxCreated) {
670 maxCreated = created;
671 maxNotChangedCnt = 0;
672 } else
673 maxNotChangedCnt++;
674 }
675 ASSERT(maxNotChangedCnt == CREATED_STABLE, "The number of created pools must be stabilized.");
676}
677
678void TestPoolCreation()
679{
680 putMemAll = getMemAll = getMemSuccessful = 0;
681
682 rml::MemPoolPolicy nullPolicy(NULL, putMemFree),
683 emptyFreePolicy(getMemMalloc, NULL),
684 okPolicy(getMemMalloc, putMemFree);
685 rml::MemoryPool *pool;
686
687 rml::MemPoolError res = pool_create_v1(0, &nullPolicy, &pool);
688 ASSERT(res==rml::INVALID_POLICY, "pool with empty pAlloc can't be created");
689 res = pool_create_v1(0, &emptyFreePolicy, &pool);
690 ASSERT(res==rml::INVALID_POLICY, "pool with empty pFree can't be created");
691 ASSERT(!putMemAll && !getMemAll, "no callback calls are expected");
692 res = pool_create_v1(0, &okPolicy, &pool);
693 ASSERT(res==rml::POOL_OK, NULL);
694 bool ok = pool_destroy(pool);
695 ASSERT(ok, NULL);
696 ASSERT(putMemAll == getMemSuccessful, "no leaks after pool_destroy");
697
698 // 32 is a guess for a number of pools that is acceptable everywere
699 CheckPoolLeaks(32);
700 // try to consume all but 16 TLS keys
701 LimitTLSKeysTo limitTLSTo(16);
702 // ...and check that we can create at least 16 pools
703 CheckPoolLeaks(16);
704}
705
706struct AllocatedObject {
707 rml::MemoryPool *pool;
708};
709
710const size_t BUF_SIZE = 1024*1024;
711
712class PoolIdentityCheck : NoAssign {
713 rml::MemoryPool** const pools;
714 AllocatedObject** const objs;
715public:
716 PoolIdentityCheck(rml::MemoryPool** p, AllocatedObject** o) : pools(p), objs(o) {}
717 void operator()(int id) const {
718 objs[id] = (AllocatedObject*)pool_malloc(pools[id], BUF_SIZE/2);
719 ASSERT(objs[id], NULL);
720 rml::MemoryPool *act_pool = rml::pool_identify(objs[id]);
721 ASSERT(act_pool == pools[id], NULL);
722
723 for (size_t total=0; total<2*BUF_SIZE; total+=256) {
724 AllocatedObject *o = (AllocatedObject*)pool_malloc(pools[id], 256);
725 ASSERT(o, NULL);
726 act_pool = rml::pool_identify(o);
727 ASSERT(act_pool == pools[id], NULL);
728 pool_free(act_pool, o);
729 }
730 if( id&1 ) { // make every second returned object "small"
731 pool_free(act_pool, objs[id]);
732 objs[id] = (AllocatedObject*)pool_malloc(pools[id], 16);
733 ASSERT(objs[id], NULL);
734 }
735 objs[id]->pool = act_pool;
736 }
737};
738
739void TestPoolDetection()
740{
741 const int POOLS = 4;
742 rml::MemPoolPolicy pol(fixedBufGetMem, NULL, 0, /*fixedSizePool=*/true,
743 /*keepMemTillDestroy=*/false);
744 rml::MemoryPool *pools[POOLS];
745 FixedPoolHead<BUF_SIZE*POOLS> head[POOLS];
746 AllocatedObject *objs[POOLS];
747
748 for (int i=0; i<POOLS; i++)
749 pool_create_v1((intptr_t)(head+i), &pol, &pools[i]);
750 // if object somehow released to different pools, subsequent allocation
751 // from affected pools became impossible
752 for (int k=0; k<10; k++) {
753 PoolIdentityCheck check(pools, objs);
754 if( k&1 )
755 NativeParallelFor( POOLS, check);
756 else
757 for (int i=0; i<POOLS; i++) check(i);
758
759 for (int i=0; i<POOLS; i++) {
760 rml::MemoryPool *p = rml::pool_identify(objs[i]);
761 ASSERT(p == objs[i]->pool, NULL);
762 pool_free(p, objs[i]);
763 }
764 }
765 for (int i=0; i<POOLS; i++) {
766 bool ok = pool_destroy(pools[i]);
767 ASSERT(ok, NULL);
768 }
769}
770
771void TestLazyBootstrap()
772{
773 rml::MemPoolPolicy pol(getMemMalloc, putMemFree);
774 const size_t sizes[] = {8, 9*1024, 0};
775
776 for (int i=0; sizes[i]; i++) {
777 rml::MemoryPool *pool = CreateUsablePool(sizes[i]);
778 bool ok = pool_destroy(pool);
779 ASSERT(ok, NULL);
780 ASSERT(getMemSuccessful == putMemAll, "No leak.");
781 }
782}
783
784class NoLeakOnDestroyRun: NoAssign {
785 rml::MemoryPool *pool;
786 Harness::SpinBarrier *barrier;
787public:
788 NoLeakOnDestroyRun(rml::MemoryPool *p, Harness::SpinBarrier *b) : pool(p), barrier(b) {}
789 void operator()(int id) const {
790 void *p = pool_malloc(pool, id%2? 8 : 9000);
791 ASSERT(p && liveRegions, NULL);
792 barrier->wait();
793 if (!id) {
794 bool ok = pool_destroy(pool);
795 ASSERT(ok, NULL);
796 ASSERT(!liveRegions, "Expected all regions were released.");
797 }
798 // other threads must wait till pool destruction,
799 // to not call thread destruction cleanup before this
800 barrier->wait();
801 }
802};
803
804void TestNoLeakOnDestroy()
805{
806 liveRegions = 0;
807 for (int p=MinThread; p<=MaxThread; p++) {
808 rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
809 Harness::SpinBarrier barrier(p);
810 rml::MemoryPool *pool;
811
812 pool_create_v1(0, &pol, &pool);
813 NativeParallelFor(p, NoLeakOnDestroyRun(pool, &barrier));
814 }
815}
816
817
818static int putMallocMemError(intptr_t /*pool_id*/, void *ptr, size_t bytes)
819{
820 MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1;
821 ASSERT(bytes == hdr->userSize, "Invalid size in pool callback.");
822 free(hdr->rawPtr);
823
824 liveRegions--;
825
826 return -1;
827}
828
829void TestDestroyFailed()
830{
831 rml::MemPoolPolicy pol(getMallocMem, putMallocMemError);
832 rml::MemoryPool *pool;
833 pool_create_v1(0, &pol, &pool);
834 void *ptr = pool_malloc(pool, 16);
835 ASSERT(ptr, NULL);
836 bool fail = pool_destroy(pool);
837 ASSERT(fail==false, "putMemPolicyError callback returns error, "
838 "expect pool_destroy() failure");
839}
840
841void TestPoolMSize() {
842 rml::MemoryPool *pool = CreateUsablePool(1024);
843
844 const int SZ = 10;
845 // Original allocation requests, random numbers from small to large
846 size_t requestedSz[SZ] = {8, 16, 500, 1000, 2000, 4000, 8000, 1024*1024, 4242+4242, 8484+8484};
847
848 // Unlike large objects, small objects do not store its original size along with the object itself
849 // On Power architecture TLS bins are divided differently.
850 size_t allocatedSz[SZ] =
851#if __powerpc64__ || __ppc64__ || __bgp__
852 {8, 16, 512, 1024, 2688, 5376, 8064, 1024*1024, 4242+4242, 8484+8484};
853#else
854 {8, 16, 512, 1024, 2688, 4032, 8128, 1024*1024, 4242+4242, 8484+8484};
855#endif
856 for (int i = 0; i < SZ; i++) {
857 void* obj = pool_malloc(pool, requestedSz[i]);
858 size_t objSize = pool_msize(pool, obj);
859 ASSERT(objSize == allocatedSz[i], "pool_msize returned the wrong value");
860 pool_free(pool, obj);
861 }
862 bool destroyed = pool_destroy(pool);
863 ASSERT(destroyed, NULL);
864}
865
866int TestMain () {
867 TestTooSmallBuffer();
868 TestPoolReset();
869 TestSharedPool();
870 TestCrossThreadPools();
871 TestFixedBufferPool();
872 TestPoolGranularity();
873 TestPoolKeepTillDestroy();
874 TestEntries();
875 TestPoolCreation();
876 TestPoolDetection();
877 TestLazyBootstrap();
878 TestNoLeakOnDestroy();
879 TestDestroyFailed();
880 TestPoolMSize();
881
882 return Harness::Done;
883}
884