1/*-------------------------------------------------------------------------
2 *
3 * generation.c
4 * Generational allocator definitions.
5 *
6 * Generation is a custom MemoryContext implementation designed for cases of
7 * chunks with similar lifespan.
8 *
9 * Portions Copyright (c) 2017-2019, PostgreSQL Global Development Group
10 *
11 * IDENTIFICATION
12 * src/backend/utils/mmgr/generation.c
13 *
14 *
15 * This memory context is based on the assumption that the chunks are freed
16 * roughly in the same order as they were allocated (FIFO), or in groups with
17 * similar lifespan (generations - hence the name of the context). This is
18 * typical for various queue-like use cases, i.e. when tuples are constructed,
19 * processed and then thrown away.
20 *
21 * The memory context uses a very simple approach to free space management.
22 * Instead of a complex global freelist, each block tracks a number
23 * of allocated and freed chunks. Freed chunks are not reused, and once all
24 * chunks in a block are freed, the whole block is thrown away. When the
25 * chunks allocated in the same block have similar lifespan, this works
26 * very well and is very cheap.
27 *
28 * The current implementation only uses a fixed block size - maybe it should
29 * adapt a min/max block size range, and grow the blocks automatically.
30 * It already uses dedicated blocks for oversized chunks.
31 *
32 * XXX It might be possible to improve this by keeping a small freelist for
33 * only a small number of recent blocks, but it's not clear it's worth the
34 * additional complexity.
35 *
36 *-------------------------------------------------------------------------
37 */
38
39#include "postgres.h"
40
41#include "lib/ilist.h"
42#include "utils/memdebug.h"
43#include "utils/memutils.h"
44
45
46#define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
47#define Generation_CHUNKHDRSZ sizeof(GenerationChunk)
48
49typedef struct GenerationBlock GenerationBlock; /* forward reference */
50typedef struct GenerationChunk GenerationChunk;
51
52typedef void *GenerationPointer;
53
54/*
55 * GenerationContext is a simple memory context not reusing allocated chunks,
56 * and freeing blocks once all chunks are freed.
57 */
58typedef struct GenerationContext
59{
60 MemoryContextData header; /* Standard memory-context fields */
61
62 /* Generational context parameters */
63 Size blockSize; /* standard block size */
64
65 GenerationBlock *block; /* current (most recently allocated) block */
66 dlist_head blocks; /* list of blocks */
67} GenerationContext;
68
69/*
70 * GenerationBlock
71 * GenerationBlock is the unit of memory that is obtained by generation.c
72 * from malloc(). It contains one or more GenerationChunks, which are
73 * the units requested by palloc() and freed by pfree(). GenerationChunks
74 * cannot be returned to malloc() individually, instead pfree()
75 * updates the free counter of the block and when all chunks in a block
76 * are free the whole block is returned to malloc().
77 *
78 * GenerationBlock is the header data for a block --- the usable space
79 * within the block begins at the next alignment boundary.
80 */
81struct GenerationBlock
82{
83 dlist_node node; /* doubly-linked list of blocks */
84 Size blksize; /* allocated size of this block */
85 int nchunks; /* number of chunks in the block */
86 int nfree; /* number of free chunks */
87 char *freeptr; /* start of free space in this block */
88 char *endptr; /* end of space in this block */
89};
90
91/*
92 * GenerationChunk
93 * The prefix of each piece of memory in a GenerationBlock
94 *
95 * Note: to meet the memory context APIs, the payload area of the chunk must
96 * be maxaligned, and the "context" link must be immediately adjacent to the
97 * payload area (cf. GetMemoryChunkContext). We simplify matters for this
98 * module by requiring sizeof(GenerationChunk) to be maxaligned, and then
99 * we can ensure things work by adding any required alignment padding before
100 * the pointer fields. There is a static assertion below that the alignment
101 * is done correctly.
102 */
103struct GenerationChunk
104{
105 /* size is always the size of the usable space in the chunk */
106 Size size;
107#ifdef MEMORY_CONTEXT_CHECKING
108 /* when debugging memory usage, also store actual requested size */
109 /* this is zero in a free chunk */
110 Size requested_size;
111
112#define GENERATIONCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P * 2)
113#else
114#define GENERATIONCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P * 2)
115#endif /* MEMORY_CONTEXT_CHECKING */
116
117 /* ensure proper alignment by adding padding if needed */
118#if (GENERATIONCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
119 char padding[MAXIMUM_ALIGNOF - GENERATIONCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
120#endif
121
122 GenerationBlock *block; /* block owning this chunk */
123 GenerationContext *context; /* owning context, or NULL if freed chunk */
124 /* there must not be any padding to reach a MAXALIGN boundary here! */
125};
126
127/*
128 * Only the "context" field should be accessed outside this module.
129 * We keep the rest of an allocated chunk's header marked NOACCESS when using
130 * valgrind. But note that freed chunk headers are kept accessible, for
131 * simplicity.
132 */
133#define GENERATIONCHUNK_PRIVATE_LEN offsetof(GenerationChunk, context)
134
135/*
136 * GenerationIsValid
137 * True iff set is valid allocation set.
138 */
139#define GenerationIsValid(set) PointerIsValid(set)
140
141#define GenerationPointerGetChunk(ptr) \
142 ((GenerationChunk *)(((char *)(ptr)) - Generation_CHUNKHDRSZ))
143#define GenerationChunkGetPointer(chk) \
144 ((GenerationPointer *)(((char *)(chk)) + Generation_CHUNKHDRSZ))
145
146/*
147 * These functions implement the MemoryContext API for Generation contexts.
148 */
149static void *GenerationAlloc(MemoryContext context, Size size);
150static void GenerationFree(MemoryContext context, void *pointer);
151static void *GenerationRealloc(MemoryContext context, void *pointer, Size size);
152static void GenerationReset(MemoryContext context);
153static void GenerationDelete(MemoryContext context);
154static Size GenerationGetChunkSpace(MemoryContext context, void *pointer);
155static bool GenerationIsEmpty(MemoryContext context);
156static void GenerationStats(MemoryContext context,
157 MemoryStatsPrintFunc printfunc, void *passthru,
158 MemoryContextCounters *totals);
159
160#ifdef MEMORY_CONTEXT_CHECKING
161static void GenerationCheck(MemoryContext context);
162#endif
163
164/*
165 * This is the virtual function table for Generation contexts.
166 */
167static const MemoryContextMethods GenerationMethods = {
168 GenerationAlloc,
169 GenerationFree,
170 GenerationRealloc,
171 GenerationReset,
172 GenerationDelete,
173 GenerationGetChunkSpace,
174 GenerationIsEmpty,
175 GenerationStats
176#ifdef MEMORY_CONTEXT_CHECKING
177 ,GenerationCheck
178#endif
179};
180
181/* ----------
182 * Debug macros
183 * ----------
184 */
185#ifdef HAVE_ALLOCINFO
186#define GenerationFreeInfo(_cxt, _chunk) \
187 fprintf(stderr, "GenerationFree: %s: %p, %lu\n", \
188 (_cxt)->name, (_chunk), (_chunk)->size)
189#define GenerationAllocInfo(_cxt, _chunk) \
190 fprintf(stderr, "GenerationAlloc: %s: %p, %lu\n", \
191 (_cxt)->name, (_chunk), (_chunk)->size)
192#else
193#define GenerationFreeInfo(_cxt, _chunk)
194#define GenerationAllocInfo(_cxt, _chunk)
195#endif
196
197
198/*
199 * Public routines
200 */
201
202
203/*
204 * GenerationContextCreate
205 * Create a new Generation context.
206 *
207 * parent: parent context, or NULL if top-level context
208 * name: name of context (must be statically allocated)
209 * blockSize: generation block size
210 */
211MemoryContext
212GenerationContextCreate(MemoryContext parent,
213 const char *name,
214 Size blockSize)
215{
216 GenerationContext *set;
217
218 /* Assert we padded GenerationChunk properly */
219 StaticAssertStmt(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ),
220 "sizeof(GenerationChunk) is not maxaligned");
221 StaticAssertStmt(offsetof(GenerationChunk, context) + sizeof(MemoryContext) ==
222 Generation_CHUNKHDRSZ,
223 "padding calculation in GenerationChunk is wrong");
224
225 /*
226 * First, validate allocation parameters. (If we're going to throw an
227 * error, we should do so before the context is created, not after.) We
228 * somewhat arbitrarily enforce a minimum 1K block size, mostly because
229 * that's what AllocSet does.
230 */
231 if (blockSize != MAXALIGN(blockSize) ||
232 blockSize < 1024 ||
233 !AllocHugeSizeIsValid(blockSize))
234 elog(ERROR, "invalid blockSize for memory context: %zu",
235 blockSize);
236
237 /*
238 * Allocate the context header. Unlike aset.c, we never try to combine
239 * this with the first regular block, since that would prevent us from
240 * freeing the first generation of allocations.
241 */
242
243 set = (GenerationContext *) malloc(MAXALIGN(sizeof(GenerationContext)));
244 if (set == NULL)
245 {
246 MemoryContextStats(TopMemoryContext);
247 ereport(ERROR,
248 (errcode(ERRCODE_OUT_OF_MEMORY),
249 errmsg("out of memory"),
250 errdetail("Failed while creating memory context \"%s\".",
251 name)));
252 }
253
254 /*
255 * Avoid writing code that can fail between here and MemoryContextCreate;
256 * we'd leak the header if we ereport in this stretch.
257 */
258
259 /* Fill in GenerationContext-specific header fields */
260 set->blockSize = blockSize;
261 set->block = NULL;
262 dlist_init(&set->blocks);
263
264 /* Finally, do the type-independent part of context creation */
265 MemoryContextCreate((MemoryContext) set,
266 T_GenerationContext,
267 &GenerationMethods,
268 parent,
269 name);
270
271 return (MemoryContext) set;
272}
273
274/*
275 * GenerationReset
276 * Frees all memory which is allocated in the given set.
277 *
278 * The code simply frees all the blocks in the context - we don't keep any
279 * keeper blocks or anything like that.
280 */
281static void
282GenerationReset(MemoryContext context)
283{
284 GenerationContext *set = (GenerationContext *) context;
285 dlist_mutable_iter miter;
286
287 AssertArg(GenerationIsValid(set));
288
289#ifdef MEMORY_CONTEXT_CHECKING
290 /* Check for corruption and leaks before freeing */
291 GenerationCheck(context);
292#endif
293
294 dlist_foreach_modify(miter, &set->blocks)
295 {
296 GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
297
298 dlist_delete(miter.cur);
299
300#ifdef CLOBBER_FREED_MEMORY
301 wipe_mem(block, block->blksize);
302#endif
303
304 free(block);
305 }
306
307 set->block = NULL;
308
309 Assert(dlist_is_empty(&set->blocks));
310}
311
312/*
313 * GenerationDelete
314 * Free all memory which is allocated in the given context.
315 */
316static void
317GenerationDelete(MemoryContext context)
318{
319 /* Reset to release all the GenerationBlocks */
320 GenerationReset(context);
321 /* And free the context header */
322 free(context);
323}
324
325/*
326 * GenerationAlloc
327 * Returns pointer to allocated memory of given size or NULL if
328 * request could not be completed; memory is added to the set.
329 *
330 * No request may exceed:
331 * MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ
332 * All callers use a much-lower limit.
333 *
334 * Note: when using valgrind, it doesn't matter how the returned allocation
335 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
336 * return space that is marked NOACCESS - GenerationRealloc has to beware!
337 */
338static void *
339GenerationAlloc(MemoryContext context, Size size)
340{
341 GenerationContext *set = (GenerationContext *) context;
342 GenerationBlock *block;
343 GenerationChunk *chunk;
344 Size chunk_size = MAXALIGN(size);
345
346 /* is it an over-sized chunk? if yes, allocate special block */
347 if (chunk_size > set->blockSize / 8)
348 {
349 Size blksize = chunk_size + Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ;
350
351 block = (GenerationBlock *) malloc(blksize);
352 if (block == NULL)
353 return NULL;
354
355 /* block with a single (used) chunk */
356 block->blksize = blksize;
357 block->nchunks = 1;
358 block->nfree = 0;
359
360 /* the block is completely full */
361 block->freeptr = block->endptr = ((char *) block) + blksize;
362
363 chunk = (GenerationChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
364 chunk->block = block;
365 chunk->context = set;
366 chunk->size = chunk_size;
367
368#ifdef MEMORY_CONTEXT_CHECKING
369 chunk->requested_size = size;
370 /* set mark to catch clobber of "unused" space */
371 if (size < chunk_size)
372 set_sentinel(GenerationChunkGetPointer(chunk), size);
373#endif
374#ifdef RANDOMIZE_ALLOCATED_MEMORY
375 /* fill the allocated space with junk */
376 randomize_mem((char *) GenerationChunkGetPointer(chunk), size);
377#endif
378
379 /* add the block to the list of allocated blocks */
380 dlist_push_head(&set->blocks, &block->node);
381
382 GenerationAllocInfo(set, chunk);
383
384 /* Ensure any padding bytes are marked NOACCESS. */
385 VALGRIND_MAKE_MEM_NOACCESS((char *) GenerationChunkGetPointer(chunk) + size,
386 chunk_size - size);
387
388 /* Disallow external access to private part of chunk header. */
389 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
390
391 return GenerationChunkGetPointer(chunk);
392 }
393
394 /*
395 * Not an over-sized chunk. Is there enough space in the current block? If
396 * not, allocate a new "regular" block.
397 */
398 block = set->block;
399
400 if ((block == NULL) ||
401 (block->endptr - block->freeptr) < Generation_CHUNKHDRSZ + chunk_size)
402 {
403 Size blksize = set->blockSize;
404
405 block = (GenerationBlock *) malloc(blksize);
406
407 if (block == NULL)
408 return NULL;
409
410 block->blksize = blksize;
411 block->nchunks = 0;
412 block->nfree = 0;
413
414 block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
415 block->endptr = ((char *) block) + blksize;
416
417 /* Mark unallocated space NOACCESS. */
418 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
419 blksize - Generation_BLOCKHDRSZ);
420
421 /* add it to the doubly-linked list of blocks */
422 dlist_push_head(&set->blocks, &block->node);
423
424 /* and also use it as the current allocation block */
425 set->block = block;
426 }
427
428 /* we're supposed to have a block with enough free space now */
429 Assert(block != NULL);
430 Assert((block->endptr - block->freeptr) >= Generation_CHUNKHDRSZ + chunk_size);
431
432 chunk = (GenerationChunk *) block->freeptr;
433
434 /* Prepare to initialize the chunk header. */
435 VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ);
436
437 block->nchunks += 1;
438 block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
439
440 Assert(block->freeptr <= block->endptr);
441
442 chunk->block = block;
443 chunk->context = set;
444 chunk->size = chunk_size;
445
446#ifdef MEMORY_CONTEXT_CHECKING
447 chunk->requested_size = size;
448 /* set mark to catch clobber of "unused" space */
449 if (size < chunk->size)
450 set_sentinel(GenerationChunkGetPointer(chunk), size);
451#endif
452#ifdef RANDOMIZE_ALLOCATED_MEMORY
453 /* fill the allocated space with junk */
454 randomize_mem((char *) GenerationChunkGetPointer(chunk), size);
455#endif
456
457 GenerationAllocInfo(set, chunk);
458
459 /* Ensure any padding bytes are marked NOACCESS. */
460 VALGRIND_MAKE_MEM_NOACCESS((char *) GenerationChunkGetPointer(chunk) + size,
461 chunk_size - size);
462
463 /* Disallow external access to private part of chunk header. */
464 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
465
466 return GenerationChunkGetPointer(chunk);
467}
468
469/*
470 * GenerationFree
471 * Update number of chunks in the block, and if all chunks in the block
472 * are now free then discard the block.
473 */
474static void
475GenerationFree(MemoryContext context, void *pointer)
476{
477 GenerationContext *set = (GenerationContext *) context;
478 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
479 GenerationBlock *block;
480
481 /* Allow access to private part of chunk header. */
482 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
483
484 block = chunk->block;
485
486#ifdef MEMORY_CONTEXT_CHECKING
487 /* Test for someone scribbling on unused space in chunk */
488 if (chunk->requested_size < chunk->size)
489 if (!sentinel_ok(pointer, chunk->requested_size))
490 elog(WARNING, "detected write past chunk end in %s %p",
491 ((MemoryContext) set)->name, chunk);
492#endif
493
494#ifdef CLOBBER_FREED_MEMORY
495 wipe_mem(pointer, chunk->size);
496#endif
497
498 /* Reset context to NULL in freed chunks */
499 chunk->context = NULL;
500
501#ifdef MEMORY_CONTEXT_CHECKING
502 /* Reset requested_size to 0 in freed chunks */
503 chunk->requested_size = 0;
504#endif
505
506 block->nfree += 1;
507
508 Assert(block->nchunks > 0);
509 Assert(block->nfree <= block->nchunks);
510
511 /* If there are still allocated chunks in the block, we're done. */
512 if (block->nfree < block->nchunks)
513 return;
514
515 /*
516 * The block is empty, so let's get rid of it. First remove it from the
517 * list of blocks, then return it to malloc().
518 */
519 dlist_delete(&block->node);
520
521 /* Also make sure the block is not marked as the current block. */
522 if (set->block == block)
523 set->block = NULL;
524
525 free(block);
526}
527
528/*
529 * GenerationRealloc
530 * When handling repalloc, we simply allocate a new chunk, copy the data
531 * and discard the old one. The only exception is when the new size fits
532 * into the old chunk - in that case we just update chunk header.
533 */
534static void *
535GenerationRealloc(MemoryContext context, void *pointer, Size size)
536{
537 GenerationContext *set = (GenerationContext *) context;
538 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
539 GenerationPointer newPointer;
540 Size oldsize;
541
542 /* Allow access to private part of chunk header. */
543 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
544
545 oldsize = chunk->size;
546
547#ifdef MEMORY_CONTEXT_CHECKING
548 /* Test for someone scribbling on unused space in chunk */
549 if (chunk->requested_size < oldsize)
550 if (!sentinel_ok(pointer, chunk->requested_size))
551 elog(WARNING, "detected write past chunk end in %s %p",
552 ((MemoryContext) set)->name, chunk);
553#endif
554
555 /*
556 * Maybe the allocated area already is >= the new size. (In particular,
557 * we always fall out here if the requested size is a decrease.)
558 *
559 * This memory context does not use power-of-2 chunk sizing and instead
560 * carves the chunks to be as small as possible, so most repalloc() calls
561 * will end up in the palloc/memcpy/pfree branch.
562 *
563 * XXX Perhaps we should annotate this condition with unlikely()?
564 */
565 if (oldsize >= size)
566 {
567#ifdef MEMORY_CONTEXT_CHECKING
568 Size oldrequest = chunk->requested_size;
569
570#ifdef RANDOMIZE_ALLOCATED_MEMORY
571 /* We can only fill the extra space if we know the prior request */
572 if (size > oldrequest)
573 randomize_mem((char *) pointer + oldrequest,
574 size - oldrequest);
575#endif
576
577 chunk->requested_size = size;
578
579 /*
580 * If this is an increase, mark any newly-available part UNDEFINED.
581 * Otherwise, mark the obsolete part NOACCESS.
582 */
583 if (size > oldrequest)
584 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
585 size - oldrequest);
586 else
587 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
588 oldsize - size);
589
590 /* set mark to catch clobber of "unused" space */
591 if (size < oldsize)
592 set_sentinel(pointer, size);
593#else /* !MEMORY_CONTEXT_CHECKING */
594
595 /*
596 * We don't have the information to determine whether we're growing
597 * the old request or shrinking it, so we conservatively mark the
598 * entire new allocation DEFINED.
599 */
600 VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
601 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
602#endif
603
604 /* Disallow external access to private part of chunk header. */
605 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
606
607 return pointer;
608 }
609
610 /* allocate new chunk */
611 newPointer = GenerationAlloc((MemoryContext) set, size);
612
613 /* leave immediately if request was not completed */
614 if (newPointer == NULL)
615 {
616 /* Disallow external access to private part of chunk header. */
617 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
618 return NULL;
619 }
620
621 /*
622 * GenerationAlloc() may have returned a region that is still NOACCESS.
623 * Change it to UNDEFINED for the moment; memcpy() will then transfer
624 * definedness from the old allocation to the new. If we know the old
625 * allocation, copy just that much. Otherwise, make the entire old chunk
626 * defined to avoid errors as we copy the currently-NOACCESS trailing
627 * bytes.
628 */
629 VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
630#ifdef MEMORY_CONTEXT_CHECKING
631 oldsize = chunk->requested_size;
632#else
633 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
634#endif
635
636 /* transfer existing data (certain to fit) */
637 memcpy(newPointer, pointer, oldsize);
638
639 /* free old chunk */
640 GenerationFree((MemoryContext) set, pointer);
641
642 return newPointer;
643}
644
645/*
646 * GenerationGetChunkSpace
647 * Given a currently-allocated chunk, determine the total space
648 * it occupies (including all memory-allocation overhead).
649 */
650static Size
651GenerationGetChunkSpace(MemoryContext context, void *pointer)
652{
653 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
654 Size result;
655
656 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
657 result = chunk->size + Generation_CHUNKHDRSZ;
658 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
659 return result;
660}
661
662/*
663 * GenerationIsEmpty
664 * Is a GenerationContext empty of any allocated space?
665 */
666static bool
667GenerationIsEmpty(MemoryContext context)
668{
669 GenerationContext *set = (GenerationContext *) context;
670
671 return dlist_is_empty(&set->blocks);
672}
673
674/*
675 * GenerationStats
676 * Compute stats about memory consumption of a Generation context.
677 *
678 * printfunc: if not NULL, pass a human-readable stats string to this.
679 * passthru: pass this pointer through to printfunc.
680 * totals: if not NULL, add stats about this context into *totals.
681 *
682 * XXX freespace only accounts for empty space at the end of the block, not
683 * space of freed chunks (which is unknown).
684 */
685static void
686GenerationStats(MemoryContext context,
687 MemoryStatsPrintFunc printfunc, void *passthru,
688 MemoryContextCounters *totals)
689{
690 GenerationContext *set = (GenerationContext *) context;
691 Size nblocks = 0;
692 Size nchunks = 0;
693 Size nfreechunks = 0;
694 Size totalspace;
695 Size freespace = 0;
696 dlist_iter iter;
697
698 /* Include context header in totalspace */
699 totalspace = MAXALIGN(sizeof(GenerationContext));
700
701 dlist_foreach(iter, &set->blocks)
702 {
703 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
704
705 nblocks++;
706 nchunks += block->nchunks;
707 nfreechunks += block->nfree;
708 totalspace += block->blksize;
709 freespace += (block->endptr - block->freeptr);
710 }
711
712 if (printfunc)
713 {
714 char stats_string[200];
715
716 snprintf(stats_string, sizeof(stats_string),
717 "%zu total in %zd blocks (%zd chunks); %zu free (%zd chunks); %zu used",
718 totalspace, nblocks, nchunks, freespace,
719 nfreechunks, totalspace - freespace);
720 printfunc(context, passthru, stats_string);
721 }
722
723 if (totals)
724 {
725 totals->nblocks += nblocks;
726 totals->freechunks += nfreechunks;
727 totals->totalspace += totalspace;
728 totals->freespace += freespace;
729 }
730}
731
732
733#ifdef MEMORY_CONTEXT_CHECKING
734
735/*
736 * GenerationCheck
737 * Walk through chunks and check consistency of memory.
738 *
739 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
740 * find yourself in an infinite loop when trouble occurs, because this
741 * routine will be entered again when elog cleanup tries to release memory!
742 */
743static void
744GenerationCheck(MemoryContext context)
745{
746 GenerationContext *gen = (GenerationContext *) context;
747 const char *name = context->name;
748 dlist_iter iter;
749
750 /* walk all blocks in this context */
751 dlist_foreach(iter, &gen->blocks)
752 {
753 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
754 int nfree,
755 nchunks;
756 char *ptr;
757
758 /*
759 * nfree > nchunks is surely wrong, and we don't expect to see
760 * equality either, because such a block should have gotten freed.
761 */
762 if (block->nfree >= block->nchunks)
763 elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated",
764 name, block->nfree, block, block->nchunks);
765
766 /* Now walk through the chunks and count them. */
767 nfree = 0;
768 nchunks = 0;
769 ptr = ((char *) block) + Generation_BLOCKHDRSZ;
770
771 while (ptr < block->freeptr)
772 {
773 GenerationChunk *chunk = (GenerationChunk *) ptr;
774
775 /* Allow access to private part of chunk header. */
776 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
777
778 /* move to the next chunk */
779 ptr += (chunk->size + Generation_CHUNKHDRSZ);
780
781 nchunks += 1;
782
783 /* chunks have both block and context pointers, so check both */
784 if (chunk->block != block)
785 elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p",
786 name, block, chunk);
787
788 /*
789 * Check for valid context pointer. Note this is an incomplete
790 * test, since palloc(0) produces an allocated chunk with
791 * requested_size == 0.
792 */
793 if ((chunk->requested_size > 0 && chunk->context != gen) ||
794 (chunk->context != gen && chunk->context != NULL))
795 elog(WARNING, "problem in Generation %s: bogus context link in block %p, chunk %p",
796 name, block, chunk);
797
798 /* now make sure the chunk size is correct */
799 if (chunk->size < chunk->requested_size ||
800 chunk->size != MAXALIGN(chunk->size))
801 elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p",
802 name, block, chunk);
803
804 /* is chunk allocated? */
805 if (chunk->context != NULL)
806 {
807 /* check sentinel, but only in allocated blocks */
808 if (chunk->requested_size < chunk->size &&
809 !sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size))
810 elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p",
811 name, block, chunk);
812 }
813 else
814 nfree += 1;
815
816 /*
817 * If chunk is allocated, disallow external access to private part
818 * of chunk header.
819 */
820 if (chunk->context != NULL)
821 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
822 }
823
824 /*
825 * Make sure we got the expected number of allocated and free chunks
826 * (as tracked in the block header).
827 */
828 if (nchunks != block->nchunks)
829 elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d",
830 name, nchunks, block, block->nchunks);
831
832 if (nfree != block->nfree)
833 elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d",
834 name, nfree, block, block->nfree);
835 }
836}
837
838#endif /* MEMORY_CONTEXT_CHECKING */
839