1/*-------------------------------------------------------------------------
2 *
3 * aset.c
4 * Allocation set definitions.
5 *
6 * AllocSet is our standard implementation of the abstract MemoryContext
7 * type.
8 *
9 *
10 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
11 * Portions Copyright (c) 1994, Regents of the University of California
12 *
13 * IDENTIFICATION
14 * src/backend/utils/mmgr/aset.c
15 *
16 * NOTE:
17 * This is a new (Feb. 05, 1999) implementation of the allocation set
18 * routines. AllocSet...() does not use OrderedSet...() any more.
19 * Instead it manages allocations in a block pool by itself, combining
20 * many small allocations in a few bigger blocks. AllocSetFree() normally
21 * doesn't free() memory really. It just add's the free'd area to some
22 * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 * at once on AllocSetReset(), which happens when the memory context gets
24 * destroyed.
25 * Jan Wieck
26 *
27 * Performance improvement from Tom Lane, 8/99: for extremely large request
28 * sizes, we do want to be able to give the memory back to free() as soon
29 * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 * freelist entries that might never be usable. This is specially needed
31 * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 * the previous instances of the block were guaranteed to be wasted until
33 * AllocSetReset() under the old way.
34 *
35 * Further improvement 12/00: as the code stood, request sizes in the
36 * midrange between "small" and "large" were handled very inefficiently,
37 * because any sufficiently large free chunk would be used to satisfy a
38 * request, even if it was much larger than necessary. This led to more
39 * and more wasted space in allocated chunks over time. To fix, get rid
40 * of the midrange behavior: we now handle only "small" power-of-2-size
41 * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 * the number of freelists to change the small/large boundary.
43 *
44 *-------------------------------------------------------------------------
45 */
46
47#include "postgres.h"
48
49#include "utils/memdebug.h"
50#include "utils/memutils.h"
51
52/* Define this to detail debug alloc information */
53/* #define HAVE_ALLOCINFO */
54
55/*--------------------
56 * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 *
59 * Note that all chunks in the freelists have power-of-2 sizes. This
60 * improves recyclability: we may waste some space, but the wasted space
61 * should stay pretty constant as requests are made and released.
62 *
63 * A request too large for the last freelist is handled by allocating a
64 * dedicated block from malloc(). The block still has a block header and
65 * chunk header, but when the chunk is freed we'll return the whole block
66 * to malloc(), not put it on our freelists.
67 *
68 * CAUTION: ALLOC_MINBITS must be large enough so that
69 * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 * or we may fail to align the smallest chunks adequately.
71 * 8-byte alignment is enough on all currently known machines.
72 *
73 * With the current parameters, request sizes up to 8K are treated as chunks,
74 * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
75 * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
76 * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
77 * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
78 *--------------------
79 */
80
81#define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
82#define ALLOCSET_NUM_FREELISTS 11
83#define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
84/* Size of largest chunk that we use a fixed size for */
85#define ALLOC_CHUNK_FRACTION 4
86/* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
87
88/*--------------------
89 * The first block allocated for an allocset has size initBlockSize.
90 * Each time we have to allocate another block, we double the block size
91 * (if possible, and without exceeding maxBlockSize), so as to reduce
92 * the bookkeeping load on malloc().
93 *
94 * Blocks allocated to hold oversize chunks do not follow this rule, however;
95 * they are just however big they need to be to hold that single chunk.
96 *
97 * Also, if a minContextSize is specified, the first block has that size,
98 * and then initBlockSize is used for the next one.
99 *--------------------
100 */
101
102#define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
103#define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
104
105typedef struct AllocBlockData *AllocBlock; /* forward reference */
106typedef struct AllocChunkData *AllocChunk;
107
108/*
109 * AllocPointer
110 * Aligned pointer which may be a member of an allocation set.
111 */
112typedef void *AllocPointer;
113
114/*
115 * AllocSetContext is our standard implementation of MemoryContext.
116 *
117 * Note: header.isReset means there is nothing for AllocSetReset to do.
118 * This is different from the aset being physically empty (empty blocks list)
119 * because we will still have a keeper block. It's also different from the set
120 * being logically empty, because we don't attempt to detect pfree'ing the
121 * last active chunk.
122 */
123typedef struct AllocSetContext
124{
125 MemoryContextData header; /* Standard memory-context fields */
126 /* Info about storage allocated in this context: */
127 AllocBlock blocks; /* head of list of blocks in this set */
128 AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
129 /* Allocation parameters for this context: */
130 Size initBlockSize; /* initial block size */
131 Size maxBlockSize; /* maximum block size */
132 Size nextBlockSize; /* next block size to allocate */
133 Size allocChunkLimit; /* effective chunk size limit */
134 AllocBlock keeper; /* keep this block over resets */
135 /* freelist this context could be put in, or -1 if not a candidate: */
136 int freeListIndex; /* index in context_freelists[], or -1 */
137} AllocSetContext;
138
139typedef AllocSetContext *AllocSet;
140
141/*
142 * AllocBlock
143 * An AllocBlock is the unit of memory that is obtained by aset.c
144 * from malloc(). It contains one or more AllocChunks, which are
145 * the units requested by palloc() and freed by pfree(). AllocChunks
146 * cannot be returned to malloc() individually, instead they are put
147 * on freelists by pfree() and re-used by the next palloc() that has
148 * a matching request size.
149 *
150 * AllocBlockData is the header data for a block --- the usable space
151 * within the block begins at the next alignment boundary.
152 */
153typedef struct AllocBlockData
154{
155 AllocSet aset; /* aset that owns this block */
156 AllocBlock prev; /* prev block in aset's blocks list, if any */
157 AllocBlock next; /* next block in aset's blocks list, if any */
158 char *freeptr; /* start of free space in this block */
159 char *endptr; /* end of space in this block */
160} AllocBlockData;
161
162/*
163 * AllocChunk
164 * The prefix of each piece of memory in an AllocBlock
165 *
166 * Note: to meet the memory context APIs, the payload area of the chunk must
167 * be maxaligned, and the "aset" link must be immediately adjacent to the
168 * payload area (cf. GetMemoryChunkContext). We simplify matters for this
169 * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
170 * we can ensure things work by adding any required alignment padding before
171 * the "aset" field. There is a static assertion below that the alignment
172 * is done correctly.
173 */
174typedef struct AllocChunkData
175{
176 /* size is always the size of the usable space in the chunk */
177 Size size;
178#ifdef MEMORY_CONTEXT_CHECKING
179 /* when debugging memory usage, also store actual requested size */
180 /* this is zero in a free chunk */
181 Size requested_size;
182
183#define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
184#else
185#define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P)
186#endif /* MEMORY_CONTEXT_CHECKING */
187
188 /* ensure proper alignment by adding padding if needed */
189#if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
190 char padding[MAXIMUM_ALIGNOF - ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
191#endif
192
193 /* aset is the owning aset if allocated, or the freelist link if free */
194 void *aset;
195 /* there must not be any padding to reach a MAXALIGN boundary here! */
196} AllocChunkData;
197
198/*
199 * Only the "aset" field should be accessed outside this module.
200 * We keep the rest of an allocated chunk's header marked NOACCESS when using
201 * valgrind. But note that chunk headers that are in a freelist are kept
202 * accessible, for simplicity.
203 */
204#define ALLOCCHUNK_PRIVATE_LEN offsetof(AllocChunkData, aset)
205
206/*
207 * AllocPointerIsValid
208 * True iff pointer is valid allocation pointer.
209 */
210#define AllocPointerIsValid(pointer) PointerIsValid(pointer)
211
212/*
213 * AllocSetIsValid
214 * True iff set is valid allocation set.
215 */
216#define AllocSetIsValid(set) PointerIsValid(set)
217
218#define AllocPointerGetChunk(ptr) \
219 ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
220#define AllocChunkGetPointer(chk) \
221 ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
222
223/*
224 * Rather than repeatedly creating and deleting memory contexts, we keep some
225 * freed contexts in freelists so that we can hand them out again with little
226 * work. Before putting a context in a freelist, we reset it so that it has
227 * only its initial malloc chunk and no others. To be a candidate for a
228 * freelist, a context must have the same minContextSize/initBlockSize as
229 * other contexts in the list; but its maxBlockSize is irrelevant since that
230 * doesn't affect the size of the initial chunk.
231 *
232 * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
233 * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
234 * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
235 *
236 * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
237 * hopes of improving locality of reference. But if there get to be too
238 * many contexts in the list, we'd prefer to drop the most-recently-created
239 * contexts in hopes of keeping the process memory map compact.
240 * We approximate that by simply deleting all existing entries when the list
241 * overflows, on the assumption that queries that allocate a lot of contexts
242 * will probably free them in more or less reverse order of allocation.
243 *
244 * Contexts in a freelist are chained via their nextchild pointers.
245 */
246#define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
247
248typedef struct AllocSetFreeList
249{
250 int num_free; /* current list length */
251 AllocSetContext *first_free; /* list header */
252} AllocSetFreeList;
253
254/* context_freelists[0] is for default params, [1] for small params */
255static AllocSetFreeList context_freelists[2] =
256{
257 {
258 0, NULL
259 },
260 {
261 0, NULL
262 }
263};
264
265/*
266 * These functions implement the MemoryContext API for AllocSet contexts.
267 */
268static void *AllocSetAlloc(MemoryContext context, Size size);
269static void AllocSetFree(MemoryContext context, void *pointer);
270static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
271static void AllocSetReset(MemoryContext context);
272static void AllocSetDelete(MemoryContext context);
273static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
274static bool AllocSetIsEmpty(MemoryContext context);
275static void AllocSetStats(MemoryContext context,
276 MemoryStatsPrintFunc printfunc, void *passthru,
277 MemoryContextCounters *totals);
278
279#ifdef MEMORY_CONTEXT_CHECKING
280static void AllocSetCheck(MemoryContext context);
281#endif
282
283/*
284 * This is the virtual function table for AllocSet contexts.
285 */
286static const MemoryContextMethods AllocSetMethods = {
287 AllocSetAlloc,
288 AllocSetFree,
289 AllocSetRealloc,
290 AllocSetReset,
291 AllocSetDelete,
292 AllocSetGetChunkSpace,
293 AllocSetIsEmpty,
294 AllocSetStats
295#ifdef MEMORY_CONTEXT_CHECKING
296 ,AllocSetCheck
297#endif
298};
299
300/*
301 * Table for AllocSetFreeIndex
302 */
303#define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
304
305static const unsigned char LogTable256[256] =
306{
307 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
308 LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
309 LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
310};
311
312/* ----------
313 * Debug macros
314 * ----------
315 */
316#ifdef HAVE_ALLOCINFO
317#define AllocFreeInfo(_cxt, _chunk) \
318 fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
319 (_cxt)->header.name, (_chunk), (_chunk)->size)
320#define AllocAllocInfo(_cxt, _chunk) \
321 fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
322 (_cxt)->header.name, (_chunk), (_chunk)->size)
323#else
324#define AllocFreeInfo(_cxt, _chunk)
325#define AllocAllocInfo(_cxt, _chunk)
326#endif
327
328/* ----------
329 * AllocSetFreeIndex -
330 *
331 * Depending on the size of an allocation compute which freechunk
332 * list of the alloc set it belongs to. Caller must have verified
333 * that size <= ALLOC_CHUNK_LIMIT.
334 * ----------
335 */
336static inline int
337AllocSetFreeIndex(Size size)
338{
339 int idx;
340 unsigned int t,
341 tsize;
342
343 if (size > (1 << ALLOC_MINBITS))
344 {
345 tsize = (size - 1) >> ALLOC_MINBITS;
346
347 /*
348 * At this point we need to obtain log2(tsize)+1, ie, the number of
349 * not-all-zero bits at the right. We used to do this with a
350 * shift-and-count loop, but this function is enough of a hotspot to
351 * justify micro-optimization effort. The best approach seems to be
352 * to use a lookup table. Note that this code assumes that
353 * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
354 * the tsize value.
355 */
356 t = tsize >> 8;
357 idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
358
359 Assert(idx < ALLOCSET_NUM_FREELISTS);
360 }
361 else
362 idx = 0;
363
364 return idx;
365}
366
367
368/*
369 * Public routines
370 */
371
372
373/*
374 * AllocSetContextCreateInternal
375 * Create a new AllocSet context.
376 *
377 * parent: parent context, or NULL if top-level context
378 * name: name of context (must be statically allocated)
379 * minContextSize: minimum context size
380 * initBlockSize: initial allocation block size
381 * maxBlockSize: maximum allocation block size
382 *
383 * Most callers should abstract the context size parameters using a macro
384 * such as ALLOCSET_DEFAULT_SIZES.
385 *
386 * Note: don't call this directly; go through the wrapper macro
387 * AllocSetContextCreate.
388 */
389MemoryContext
390AllocSetContextCreateInternal(MemoryContext parent,
391 const char *name,
392 Size minContextSize,
393 Size initBlockSize,
394 Size maxBlockSize)
395{
396 int freeListIndex;
397 Size firstBlockSize;
398 AllocSet set;
399 AllocBlock block;
400
401 /* Assert we padded AllocChunkData properly */
402 StaticAssertStmt(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
403 "sizeof(AllocChunkData) is not maxaligned");
404 StaticAssertStmt(offsetof(AllocChunkData, aset) + sizeof(MemoryContext) ==
405 ALLOC_CHUNKHDRSZ,
406 "padding calculation in AllocChunkData is wrong");
407
408 /*
409 * First, validate allocation parameters. Once these were regular runtime
410 * test and elog's, but in practice Asserts seem sufficient because nobody
411 * varies their parameters at runtime. We somewhat arbitrarily enforce a
412 * minimum 1K block size.
413 */
414 Assert(initBlockSize == MAXALIGN(initBlockSize) &&
415 initBlockSize >= 1024);
416 Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
417 maxBlockSize >= initBlockSize &&
418 AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
419 Assert(minContextSize == 0 ||
420 (minContextSize == MAXALIGN(minContextSize) &&
421 minContextSize >= 1024 &&
422 minContextSize <= maxBlockSize));
423
424 /*
425 * Check whether the parameters match either available freelist. We do
426 * not need to demand a match of maxBlockSize.
427 */
428 if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
429 initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
430 freeListIndex = 0;
431 else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
432 initBlockSize == ALLOCSET_SMALL_INITSIZE)
433 freeListIndex = 1;
434 else
435 freeListIndex = -1;
436
437 /*
438 * If a suitable freelist entry exists, just recycle that context.
439 */
440 if (freeListIndex >= 0)
441 {
442 AllocSetFreeList *freelist = &context_freelists[freeListIndex];
443
444 if (freelist->first_free != NULL)
445 {
446 /* Remove entry from freelist */
447 set = freelist->first_free;
448 freelist->first_free = (AllocSet) set->header.nextchild;
449 freelist->num_free--;
450
451 /* Update its maxBlockSize; everything else should be OK */
452 set->maxBlockSize = maxBlockSize;
453
454 /* Reinitialize its header, installing correct name and parent */
455 MemoryContextCreate((MemoryContext) set,
456 T_AllocSetContext,
457 &AllocSetMethods,
458 parent,
459 name);
460
461 return (MemoryContext) set;
462 }
463 }
464
465 /* Determine size of initial block */
466 firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
467 ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
468 if (minContextSize != 0)
469 firstBlockSize = Max(firstBlockSize, minContextSize);
470 else
471 firstBlockSize = Max(firstBlockSize, initBlockSize);
472
473 /*
474 * Allocate the initial block. Unlike other aset.c blocks, it starts with
475 * the context header and its block header follows that.
476 */
477 set = (AllocSet) malloc(firstBlockSize);
478 if (set == NULL)
479 {
480 if (TopMemoryContext)
481 MemoryContextStats(TopMemoryContext);
482 ereport(ERROR,
483 (errcode(ERRCODE_OUT_OF_MEMORY),
484 errmsg("out of memory"),
485 errdetail("Failed while creating memory context \"%s\".",
486 name)));
487 }
488
489 /*
490 * Avoid writing code that can fail between here and MemoryContextCreate;
491 * we'd leak the header/initial block if we ereport in this stretch.
492 */
493
494 /* Fill in the initial block's block header */
495 block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
496 block->aset = set;
497 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
498 block->endptr = ((char *) set) + firstBlockSize;
499 block->prev = NULL;
500 block->next = NULL;
501
502 /* Mark unallocated space NOACCESS; leave the block header alone. */
503 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
504
505 /* Remember block as part of block list */
506 set->blocks = block;
507 /* Mark block as not to be released at reset time */
508 set->keeper = block;
509
510 /* Finish filling in aset-specific parts of the context header */
511 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
512
513 set->initBlockSize = initBlockSize;
514 set->maxBlockSize = maxBlockSize;
515 set->nextBlockSize = initBlockSize;
516 set->freeListIndex = freeListIndex;
517
518 /*
519 * Compute the allocation chunk size limit for this context. It can't be
520 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
521 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
522 * even a significant fraction of it, should be treated as large chunks
523 * too. For the typical case of maxBlockSize a power of 2, the chunk size
524 * limit will be at most 1/8th maxBlockSize, so that given a stream of
525 * requests that are all the maximum chunk size we will waste at most
526 * 1/8th of the allocated space.
527 *
528 * We have to have allocChunkLimit a power of two, because the requested
529 * and actually-allocated sizes of any chunk must be on the same side of
530 * the limit, else we get confused about whether the chunk is "big".
531 *
532 * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
533 */
534 StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
535 "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
536
537 set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
538 while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
539 (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
540 set->allocChunkLimit >>= 1;
541
542 /* Finally, do the type-independent part of context creation */
543 MemoryContextCreate((MemoryContext) set,
544 T_AllocSetContext,
545 &AllocSetMethods,
546 parent,
547 name);
548
549 return (MemoryContext) set;
550}
551
552/*
553 * AllocSetReset
554 * Frees all memory which is allocated in the given set.
555 *
556 * Actually, this routine has some discretion about what to do.
557 * It should mark all allocated chunks freed, but it need not necessarily
558 * give back all the resources the set owns. Our actual implementation is
559 * that we give back all but the "keeper" block (which we must keep, since
560 * it shares a malloc chunk with the context header). In this way, we don't
561 * thrash malloc() when a context is repeatedly reset after small allocations,
562 * which is typical behavior for per-tuple contexts.
563 */
564static void
565AllocSetReset(MemoryContext context)
566{
567 AllocSet set = (AllocSet) context;
568 AllocBlock block;
569
570 AssertArg(AllocSetIsValid(set));
571
572#ifdef MEMORY_CONTEXT_CHECKING
573 /* Check for corruption and leaks before freeing */
574 AllocSetCheck(context);
575#endif
576
577 /* Clear chunk freelists */
578 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
579
580 block = set->blocks;
581
582 /* New blocks list will be just the keeper block */
583 set->blocks = set->keeper;
584
585 while (block != NULL)
586 {
587 AllocBlock next = block->next;
588
589 if (block == set->keeper)
590 {
591 /* Reset the block, but don't return it to malloc */
592 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
593
594#ifdef CLOBBER_FREED_MEMORY
595 wipe_mem(datastart, block->freeptr - datastart);
596#else
597 /* wipe_mem() would have done this */
598 VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
599#endif
600 block->freeptr = datastart;
601 block->prev = NULL;
602 block->next = NULL;
603 }
604 else
605 {
606 /* Normal case, release the block */
607#ifdef CLOBBER_FREED_MEMORY
608 wipe_mem(block, block->freeptr - ((char *) block));
609#endif
610 free(block);
611 }
612 block = next;
613 }
614
615 /* Reset block size allocation sequence, too */
616 set->nextBlockSize = set->initBlockSize;
617}
618
619/*
620 * AllocSetDelete
621 * Frees all memory which is allocated in the given set,
622 * in preparation for deletion of the set.
623 *
624 * Unlike AllocSetReset, this *must* free all resources of the set.
625 */
626static void
627AllocSetDelete(MemoryContext context)
628{
629 AllocSet set = (AllocSet) context;
630 AllocBlock block = set->blocks;
631
632 AssertArg(AllocSetIsValid(set));
633
634#ifdef MEMORY_CONTEXT_CHECKING
635 /* Check for corruption and leaks before freeing */
636 AllocSetCheck(context);
637#endif
638
639 /*
640 * If the context is a candidate for a freelist, put it into that freelist
641 * instead of destroying it.
642 */
643 if (set->freeListIndex >= 0)
644 {
645 AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
646
647 /*
648 * Reset the context, if it needs it, so that we aren't hanging on to
649 * more than the initial malloc chunk.
650 */
651 if (!context->isReset)
652 MemoryContextResetOnly(context);
653
654 /*
655 * If the freelist is full, just discard what's already in it. See
656 * comments with context_freelists[].
657 */
658 if (freelist->num_free >= MAX_FREE_CONTEXTS)
659 {
660 while (freelist->first_free != NULL)
661 {
662 AllocSetContext *oldset = freelist->first_free;
663
664 freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
665 freelist->num_free--;
666
667 /* All that remains is to free the header/initial block */
668 free(oldset);
669 }
670 Assert(freelist->num_free == 0);
671 }
672
673 /* Now add the just-deleted context to the freelist. */
674 set->header.nextchild = (MemoryContext) freelist->first_free;
675 freelist->first_free = set;
676 freelist->num_free++;
677
678 return;
679 }
680
681 /* Free all blocks, except the keeper which is part of context header */
682 while (block != NULL)
683 {
684 AllocBlock next = block->next;
685
686#ifdef CLOBBER_FREED_MEMORY
687 wipe_mem(block, block->freeptr - ((char *) block));
688#endif
689
690 if (block != set->keeper)
691 free(block);
692
693 block = next;
694 }
695
696 /* Finally, free the context header, including the keeper block */
697 free(set);
698}
699
700/*
701 * AllocSetAlloc
702 * Returns pointer to allocated memory of given size or NULL if
703 * request could not be completed; memory is added to the set.
704 *
705 * No request may exceed:
706 * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
707 * All callers use a much-lower limit.
708 *
709 * Note: when using valgrind, it doesn't matter how the returned allocation
710 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
711 * return space that is marked NOACCESS - AllocSetRealloc has to beware!
712 */
713static void *
714AllocSetAlloc(MemoryContext context, Size size)
715{
716 AllocSet set = (AllocSet) context;
717 AllocBlock block;
718 AllocChunk chunk;
719 int fidx;
720 Size chunk_size;
721 Size blksize;
722
723 AssertArg(AllocSetIsValid(set));
724
725 /*
726 * If requested size exceeds maximum for chunks, allocate an entire block
727 * for this request.
728 */
729 if (size > set->allocChunkLimit)
730 {
731 chunk_size = MAXALIGN(size);
732 blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
733 block = (AllocBlock) malloc(blksize);
734 if (block == NULL)
735 return NULL;
736 block->aset = set;
737 block->freeptr = block->endptr = ((char *) block) + blksize;
738
739 chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
740 chunk->aset = set;
741 chunk->size = chunk_size;
742#ifdef MEMORY_CONTEXT_CHECKING
743 chunk->requested_size = size;
744 /* set mark to catch clobber of "unused" space */
745 if (size < chunk_size)
746 set_sentinel(AllocChunkGetPointer(chunk), size);
747#endif
748#ifdef RANDOMIZE_ALLOCATED_MEMORY
749 /* fill the allocated space with junk */
750 randomize_mem((char *) AllocChunkGetPointer(chunk), size);
751#endif
752
753 /*
754 * Stick the new block underneath the active allocation block, if any,
755 * so that we don't lose the use of the space remaining therein.
756 */
757 if (set->blocks != NULL)
758 {
759 block->prev = set->blocks;
760 block->next = set->blocks->next;
761 if (block->next)
762 block->next->prev = block;
763 set->blocks->next = block;
764 }
765 else
766 {
767 block->prev = NULL;
768 block->next = NULL;
769 set->blocks = block;
770 }
771
772 AllocAllocInfo(set, chunk);
773
774 /* Ensure any padding bytes are marked NOACCESS. */
775 VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
776 chunk_size - size);
777
778 /* Disallow external access to private part of chunk header. */
779 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
780
781 return AllocChunkGetPointer(chunk);
782 }
783
784 /*
785 * Request is small enough to be treated as a chunk. Look in the
786 * corresponding free list to see if there is a free chunk we could reuse.
787 * If one is found, remove it from the free list, make it again a member
788 * of the alloc set and return its data address.
789 */
790 fidx = AllocSetFreeIndex(size);
791 chunk = set->freelist[fidx];
792 if (chunk != NULL)
793 {
794 Assert(chunk->size >= size);
795
796 set->freelist[fidx] = (AllocChunk) chunk->aset;
797
798 chunk->aset = (void *) set;
799
800#ifdef MEMORY_CONTEXT_CHECKING
801 chunk->requested_size = size;
802 /* set mark to catch clobber of "unused" space */
803 if (size < chunk->size)
804 set_sentinel(AllocChunkGetPointer(chunk), size);
805#endif
806#ifdef RANDOMIZE_ALLOCATED_MEMORY
807 /* fill the allocated space with junk */
808 randomize_mem((char *) AllocChunkGetPointer(chunk), size);
809#endif
810
811 AllocAllocInfo(set, chunk);
812
813 /* Ensure any padding bytes are marked NOACCESS. */
814 VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
815 chunk->size - size);
816
817 /* Disallow external access to private part of chunk header. */
818 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
819
820 return AllocChunkGetPointer(chunk);
821 }
822
823 /*
824 * Choose the actual chunk size to allocate.
825 */
826 chunk_size = (1 << ALLOC_MINBITS) << fidx;
827 Assert(chunk_size >= size);
828
829 /*
830 * If there is enough room in the active allocation block, we will put the
831 * chunk into that block. Else must start a new one.
832 */
833 if ((block = set->blocks) != NULL)
834 {
835 Size availspace = block->endptr - block->freeptr;
836
837 if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
838 {
839 /*
840 * The existing active (top) block does not have enough room for
841 * the requested allocation, but it might still have a useful
842 * amount of space in it. Once we push it down in the block list,
843 * we'll never try to allocate more space from it. So, before we
844 * do that, carve up its free space into chunks that we can put on
845 * the set's freelists.
846 *
847 * Because we can only get here when there's less than
848 * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
849 * more than ALLOCSET_NUM_FREELISTS-1 times.
850 */
851 while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
852 {
853 Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
854 int a_fidx = AllocSetFreeIndex(availchunk);
855
856 /*
857 * In most cases, we'll get back the index of the next larger
858 * freelist than the one we need to put this chunk on. The
859 * exception is when availchunk is exactly a power of 2.
860 */
861 if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
862 {
863 a_fidx--;
864 Assert(a_fidx >= 0);
865 availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
866 }
867
868 chunk = (AllocChunk) (block->freeptr);
869
870 /* Prepare to initialize the chunk header. */
871 VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
872
873 block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
874 availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
875
876 chunk->size = availchunk;
877#ifdef MEMORY_CONTEXT_CHECKING
878 chunk->requested_size = 0; /* mark it free */
879#endif
880 chunk->aset = (void *) set->freelist[a_fidx];
881 set->freelist[a_fidx] = chunk;
882 }
883
884 /* Mark that we need to create a new block */
885 block = NULL;
886 }
887 }
888
889 /*
890 * Time to create a new regular (multi-chunk) block?
891 */
892 if (block == NULL)
893 {
894 Size required_size;
895
896 /*
897 * The first such block has size initBlockSize, and we double the
898 * space in each succeeding block, but not more than maxBlockSize.
899 */
900 blksize = set->nextBlockSize;
901 set->nextBlockSize <<= 1;
902 if (set->nextBlockSize > set->maxBlockSize)
903 set->nextBlockSize = set->maxBlockSize;
904
905 /*
906 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
907 * space... but try to keep it a power of 2.
908 */
909 required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
910 while (blksize < required_size)
911 blksize <<= 1;
912
913 /* Try to allocate it */
914 block = (AllocBlock) malloc(blksize);
915
916 /*
917 * We could be asking for pretty big blocks here, so cope if malloc
918 * fails. But give up if there's less than a meg or so available...
919 */
920 while (block == NULL && blksize > 1024 * 1024)
921 {
922 blksize >>= 1;
923 if (blksize < required_size)
924 break;
925 block = (AllocBlock) malloc(blksize);
926 }
927
928 if (block == NULL)
929 return NULL;
930
931 block->aset = set;
932 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
933 block->endptr = ((char *) block) + blksize;
934
935 /* Mark unallocated space NOACCESS. */
936 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
937 blksize - ALLOC_BLOCKHDRSZ);
938
939 block->prev = NULL;
940 block->next = set->blocks;
941 if (block->next)
942 block->next->prev = block;
943 set->blocks = block;
944 }
945
946 /*
947 * OK, do the allocation
948 */
949 chunk = (AllocChunk) (block->freeptr);
950
951 /* Prepare to initialize the chunk header. */
952 VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
953
954 block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
955 Assert(block->freeptr <= block->endptr);
956
957 chunk->aset = (void *) set;
958 chunk->size = chunk_size;
959#ifdef MEMORY_CONTEXT_CHECKING
960 chunk->requested_size = size;
961 /* set mark to catch clobber of "unused" space */
962 if (size < chunk->size)
963 set_sentinel(AllocChunkGetPointer(chunk), size);
964#endif
965#ifdef RANDOMIZE_ALLOCATED_MEMORY
966 /* fill the allocated space with junk */
967 randomize_mem((char *) AllocChunkGetPointer(chunk), size);
968#endif
969
970 AllocAllocInfo(set, chunk);
971
972 /* Ensure any padding bytes are marked NOACCESS. */
973 VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
974 chunk_size - size);
975
976 /* Disallow external access to private part of chunk header. */
977 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
978
979 return AllocChunkGetPointer(chunk);
980}
981
982/*
983 * AllocSetFree
984 * Frees allocated memory; memory is removed from the set.
985 */
986static void
987AllocSetFree(MemoryContext context, void *pointer)
988{
989 AllocSet set = (AllocSet) context;
990 AllocChunk chunk = AllocPointerGetChunk(pointer);
991
992 /* Allow access to private part of chunk header. */
993 VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
994
995 AllocFreeInfo(set, chunk);
996
997#ifdef MEMORY_CONTEXT_CHECKING
998 /* Test for someone scribbling on unused space in chunk */
999 if (chunk->requested_size < chunk->size)
1000 if (!sentinel_ok(pointer, chunk->requested_size))
1001 elog(WARNING, "detected write past chunk end in %s %p",
1002 set->header.name, chunk);
1003#endif
1004
1005 if (chunk->size > set->allocChunkLimit)
1006 {
1007 /*
1008 * Big chunks are certain to have been allocated as single-chunk
1009 * blocks. Just unlink that block and return it to malloc().
1010 */
1011 AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1012
1013 /*
1014 * Try to verify that we have a sane block pointer: it should
1015 * reference the correct aset, and freeptr and endptr should point
1016 * just past the chunk.
1017 */
1018 if (block->aset != set ||
1019 block->freeptr != block->endptr ||
1020 block->freeptr != ((char *) block) +
1021 (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1022 elog(ERROR, "could not find block containing chunk %p", chunk);
1023
1024 /* OK, remove block from aset's list and free it */
1025 if (block->prev)
1026 block->prev->next = block->next;
1027 else
1028 set->blocks = block->next;
1029 if (block->next)
1030 block->next->prev = block->prev;
1031#ifdef CLOBBER_FREED_MEMORY
1032 wipe_mem(block, block->freeptr - ((char *) block));
1033#endif
1034 free(block);
1035 }
1036 else
1037 {
1038 /* Normal case, put the chunk into appropriate freelist */
1039 int fidx = AllocSetFreeIndex(chunk->size);
1040
1041 chunk->aset = (void *) set->freelist[fidx];
1042
1043#ifdef CLOBBER_FREED_MEMORY
1044 wipe_mem(pointer, chunk->size);
1045#endif
1046
1047#ifdef MEMORY_CONTEXT_CHECKING
1048 /* Reset requested_size to 0 in chunks that are on freelist */
1049 chunk->requested_size = 0;
1050#endif
1051 set->freelist[fidx] = chunk;
1052 }
1053}
1054
1055/*
1056 * AllocSetRealloc
1057 * Returns new pointer to allocated memory of given size or NULL if
1058 * request could not be completed; this memory is added to the set.
1059 * Memory associated with given pointer is copied into the new memory,
1060 * and the old memory is freed.
1061 *
1062 * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1063 * makes our Valgrind client requests less-precise, hazarding false negatives.
1064 * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1065 * request size.)
1066 */
1067static void *
1068AllocSetRealloc(MemoryContext context, void *pointer, Size size)
1069{
1070 AllocSet set = (AllocSet) context;
1071 AllocChunk chunk = AllocPointerGetChunk(pointer);
1072 Size oldsize;
1073
1074 /* Allow access to private part of chunk header. */
1075 VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1076
1077 oldsize = chunk->size;
1078
1079#ifdef MEMORY_CONTEXT_CHECKING
1080 /* Test for someone scribbling on unused space in chunk */
1081 if (chunk->requested_size < oldsize)
1082 if (!sentinel_ok(pointer, chunk->requested_size))
1083 elog(WARNING, "detected write past chunk end in %s %p",
1084 set->header.name, chunk);
1085#endif
1086
1087 /*
1088 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1089 * allocated area already is >= the new size. (In particular, we always
1090 * fall out here if the requested size is a decrease.)
1091 */
1092 if (oldsize >= size)
1093 {
1094#ifdef MEMORY_CONTEXT_CHECKING
1095 Size oldrequest = chunk->requested_size;
1096
1097#ifdef RANDOMIZE_ALLOCATED_MEMORY
1098 /* We can only fill the extra space if we know the prior request */
1099 if (size > oldrequest)
1100 randomize_mem((char *) pointer + oldrequest,
1101 size - oldrequest);
1102#endif
1103
1104 chunk->requested_size = size;
1105
1106 /*
1107 * If this is an increase, mark any newly-available part UNDEFINED.
1108 * Otherwise, mark the obsolete part NOACCESS.
1109 */
1110 if (size > oldrequest)
1111 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1112 size - oldrequest);
1113 else
1114 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1115 oldsize - size);
1116
1117 /* set mark to catch clobber of "unused" space */
1118 if (size < oldsize)
1119 set_sentinel(pointer, size);
1120#else /* !MEMORY_CONTEXT_CHECKING */
1121
1122 /*
1123 * We don't have the information to determine whether we're growing
1124 * the old request or shrinking it, so we conservatively mark the
1125 * entire new allocation DEFINED.
1126 */
1127 VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1128 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1129#endif
1130
1131 /* Disallow external access to private part of chunk header. */
1132 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1133
1134 return pointer;
1135 }
1136
1137 if (oldsize > set->allocChunkLimit)
1138 {
1139 /*
1140 * The chunk must have been allocated as a single-chunk block. Use
1141 * realloc() to make the containing block bigger with minimum space
1142 * wastage.
1143 */
1144 AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1145 Size chksize;
1146 Size blksize;
1147
1148 /*
1149 * Try to verify that we have a sane block pointer: it should
1150 * reference the correct aset, and freeptr and endptr should point
1151 * just past the chunk.
1152 */
1153 if (block->aset != set ||
1154 block->freeptr != block->endptr ||
1155 block->freeptr != ((char *) block) +
1156 (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1157 elog(ERROR, "could not find block containing chunk %p", chunk);
1158
1159 /* Do the realloc */
1160 chksize = MAXALIGN(size);
1161 blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1162 block = (AllocBlock) realloc(block, blksize);
1163 if (block == NULL)
1164 {
1165 /* Disallow external access to private part of chunk header. */
1166 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1167 return NULL;
1168 }
1169 block->freeptr = block->endptr = ((char *) block) + blksize;
1170
1171 /* Update pointers since block has likely been moved */
1172 chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1173 pointer = AllocChunkGetPointer(chunk);
1174 if (block->prev)
1175 block->prev->next = block;
1176 else
1177 set->blocks = block;
1178 if (block->next)
1179 block->next->prev = block;
1180 chunk->size = chksize;
1181
1182#ifdef MEMORY_CONTEXT_CHECKING
1183#ifdef RANDOMIZE_ALLOCATED_MEMORY
1184 /* We can only fill the extra space if we know the prior request */
1185 randomize_mem((char *) pointer + chunk->requested_size,
1186 size - chunk->requested_size);
1187#endif
1188
1189 /*
1190 * realloc() (or randomize_mem()) will have left the newly-allocated
1191 * part UNDEFINED, but we may need to adjust trailing bytes from the
1192 * old allocation.
1193 */
1194 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1195 oldsize - chunk->requested_size);
1196
1197 chunk->requested_size = size;
1198
1199 /* set mark to catch clobber of "unused" space */
1200 if (size < chunk->size)
1201 set_sentinel(pointer, size);
1202#else /* !MEMORY_CONTEXT_CHECKING */
1203
1204 /*
1205 * We don't know how much of the old chunk size was the actual
1206 * allocation; it could have been as small as one byte. We have to be
1207 * conservative and just mark the entire old portion DEFINED.
1208 */
1209 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1210#endif
1211
1212 /* Ensure any padding bytes are marked NOACCESS. */
1213 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1214
1215 /* Disallow external access to private part of chunk header. */
1216 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1217
1218 return pointer;
1219 }
1220 else
1221 {
1222 /*
1223 * Small-chunk case. We just do this by brute force, ie, allocate a
1224 * new chunk and copy the data. Since we know the existing data isn't
1225 * huge, this won't involve any great memcpy expense, so it's not
1226 * worth being smarter. (At one time we tried to avoid memcpy when it
1227 * was possible to enlarge the chunk in-place, but that turns out to
1228 * misbehave unpleasantly for repeated cycles of
1229 * palloc/repalloc/pfree: the eventually freed chunks go into the
1230 * wrong freelist for the next initial palloc request, and so we leak
1231 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1232 */
1233 AllocPointer newPointer;
1234
1235 /* allocate new chunk */
1236 newPointer = AllocSetAlloc((MemoryContext) set, size);
1237
1238 /* leave immediately if request was not completed */
1239 if (newPointer == NULL)
1240 {
1241 /* Disallow external access to private part of chunk header. */
1242 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1243 return NULL;
1244 }
1245
1246 /*
1247 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1248 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1249 * definedness from the old allocation to the new. If we know the old
1250 * allocation, copy just that much. Otherwise, make the entire old
1251 * chunk defined to avoid errors as we copy the currently-NOACCESS
1252 * trailing bytes.
1253 */
1254 VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1255#ifdef MEMORY_CONTEXT_CHECKING
1256 oldsize = chunk->requested_size;
1257#else
1258 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1259#endif
1260
1261 /* transfer existing data (certain to fit) */
1262 memcpy(newPointer, pointer, oldsize);
1263
1264 /* free old chunk */
1265 AllocSetFree((MemoryContext) set, pointer);
1266
1267 return newPointer;
1268 }
1269}
1270
1271/*
1272 * AllocSetGetChunkSpace
1273 * Given a currently-allocated chunk, determine the total space
1274 * it occupies (including all memory-allocation overhead).
1275 */
1276static Size
1277AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1278{
1279 AllocChunk chunk = AllocPointerGetChunk(pointer);
1280 Size result;
1281
1282 VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1283 result = chunk->size + ALLOC_CHUNKHDRSZ;
1284 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1285 return result;
1286}
1287
1288/*
1289 * AllocSetIsEmpty
1290 * Is an allocset empty of any allocated space?
1291 */
1292static bool
1293AllocSetIsEmpty(MemoryContext context)
1294{
1295 /*
1296 * For now, we say "empty" only if the context is new or just reset. We
1297 * could examine the freelists to determine if all space has been freed,
1298 * but it's not really worth the trouble for present uses of this
1299 * functionality.
1300 */
1301 if (context->isReset)
1302 return true;
1303 return false;
1304}
1305
1306/*
1307 * AllocSetStats
1308 * Compute stats about memory consumption of an allocset.
1309 *
1310 * printfunc: if not NULL, pass a human-readable stats string to this.
1311 * passthru: pass this pointer through to printfunc.
1312 * totals: if not NULL, add stats about this context into *totals.
1313 */
1314static void
1315AllocSetStats(MemoryContext context,
1316 MemoryStatsPrintFunc printfunc, void *passthru,
1317 MemoryContextCounters *totals)
1318{
1319 AllocSet set = (AllocSet) context;
1320 Size nblocks = 0;
1321 Size freechunks = 0;
1322 Size totalspace;
1323 Size freespace = 0;
1324 AllocBlock block;
1325 int fidx;
1326
1327 /* Include context header in totalspace */
1328 totalspace = MAXALIGN(sizeof(AllocSetContext));
1329
1330 for (block = set->blocks; block != NULL; block = block->next)
1331 {
1332 nblocks++;
1333 totalspace += block->endptr - ((char *) block);
1334 freespace += block->endptr - block->freeptr;
1335 }
1336 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1337 {
1338 AllocChunk chunk;
1339
1340 for (chunk = set->freelist[fidx]; chunk != NULL;
1341 chunk = (AllocChunk) chunk->aset)
1342 {
1343 freechunks++;
1344 freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1345 }
1346 }
1347
1348 if (printfunc)
1349 {
1350 char stats_string[200];
1351
1352 snprintf(stats_string, sizeof(stats_string),
1353 "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
1354 totalspace, nblocks, freespace, freechunks,
1355 totalspace - freespace);
1356 printfunc(context, passthru, stats_string);
1357 }
1358
1359 if (totals)
1360 {
1361 totals->nblocks += nblocks;
1362 totals->freechunks += freechunks;
1363 totals->totalspace += totalspace;
1364 totals->freespace += freespace;
1365 }
1366}
1367
1368
1369#ifdef MEMORY_CONTEXT_CHECKING
1370
1371/*
1372 * AllocSetCheck
1373 * Walk through chunks and check consistency of memory.
1374 *
1375 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1376 * find yourself in an infinite loop when trouble occurs, because this
1377 * routine will be entered again when elog cleanup tries to release memory!
1378 */
1379static void
1380AllocSetCheck(MemoryContext context)
1381{
1382 AllocSet set = (AllocSet) context;
1383 const char *name = set->header.name;
1384 AllocBlock prevblock;
1385 AllocBlock block;
1386
1387 for (prevblock = NULL, block = set->blocks;
1388 block != NULL;
1389 prevblock = block, block = block->next)
1390 {
1391 char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1392 long blk_used = block->freeptr - bpoz;
1393 long blk_data = 0;
1394 long nchunks = 0;
1395
1396 /*
1397 * Empty block - empty can be keeper-block only
1398 */
1399 if (!blk_used)
1400 {
1401 if (set->keeper != block)
1402 elog(WARNING, "problem in alloc set %s: empty block %p",
1403 name, block);
1404 }
1405
1406 /*
1407 * Check block header fields
1408 */
1409 if (block->aset != set ||
1410 block->prev != prevblock ||
1411 block->freeptr < bpoz ||
1412 block->freeptr > block->endptr)
1413 elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1414 name, block);
1415
1416 /*
1417 * Chunk walker
1418 */
1419 while (bpoz < block->freeptr)
1420 {
1421 AllocChunk chunk = (AllocChunk) bpoz;
1422 Size chsize,
1423 dsize;
1424
1425 /* Allow access to private part of chunk header. */
1426 VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1427
1428 chsize = chunk->size; /* aligned chunk size */
1429 dsize = chunk->requested_size; /* real data */
1430
1431 /*
1432 * Check chunk size
1433 */
1434 if (dsize > chsize)
1435 elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1436 name, chunk, block);
1437 if (chsize < (1 << ALLOC_MINBITS))
1438 elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1439 name, chsize, chunk, block);
1440
1441 /* single-chunk block? */
1442 if (chsize > set->allocChunkLimit &&
1443 chsize + ALLOC_CHUNKHDRSZ != blk_used)
1444 elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1445 name, chunk, block);
1446
1447 /*
1448 * If chunk is allocated, check for correct aset pointer. (If it's
1449 * free, the aset is the freelist pointer, which we can't check as
1450 * easily...) Note this is an incomplete test, since palloc(0)
1451 * produces an allocated chunk with requested_size == 0.
1452 */
1453 if (dsize > 0 && chunk->aset != (void *) set)
1454 elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1455 name, block, chunk);
1456
1457 /*
1458 * Check for overwrite of padding space in an allocated chunk.
1459 */
1460 if (chunk->aset == (void *) set && dsize < chsize &&
1461 !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1462 elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1463 name, block, chunk);
1464
1465 /*
1466 * If chunk is allocated, disallow external access to private part
1467 * of chunk header.
1468 */
1469 if (chunk->aset == (void *) set)
1470 VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1471
1472 blk_data += chsize;
1473 nchunks++;
1474
1475 bpoz += ALLOC_CHUNKHDRSZ + chsize;
1476 }
1477
1478 if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1479 elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1480 name, block);
1481 }
1482}
1483
1484#endif /* MEMORY_CONTEXT_CHECKING */
1485