1/*-------------------------------------------------------------------------
2 *
3 * ginvacuum.c
4 * delete & vacuum routines for the postgres GIN
5 *
6 *
7 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 * IDENTIFICATION
11 * src/backend/access/gin/ginvacuum.c
12 *-------------------------------------------------------------------------
13 */
14
15#include "postgres.h"
16
17#include "access/gin_private.h"
18#include "access/ginxlog.h"
19#include "access/xloginsert.h"
20#include "commands/vacuum.h"
21#include "miscadmin.h"
22#include "postmaster/autovacuum.h"
23#include "storage/indexfsm.h"
24#include "storage/lmgr.h"
25#include "storage/predicate.h"
26#include "utils/memutils.h"
27
28struct GinVacuumState
29{
30 Relation index;
31 IndexBulkDeleteResult *result;
32 IndexBulkDeleteCallback callback;
33 void *callback_state;
34 GinState ginstate;
35 BufferAccessStrategy strategy;
36 MemoryContext tmpCxt;
37};
38
39/*
40 * Vacuums an uncompressed posting list. The size of the must can be specified
41 * in number of items (nitems).
42 *
43 * If none of the items need to be removed, returns NULL. Otherwise returns
44 * a new palloc'd array with the remaining items. The number of remaining
45 * items is returned in *nremaining.
46 */
47ItemPointer
48ginVacuumItemPointers(GinVacuumState *gvs, ItemPointerData *items,
49 int nitem, int *nremaining)
50{
51 int i,
52 remaining = 0;
53 ItemPointer tmpitems = NULL;
54
55 /*
56 * Iterate over TIDs array
57 */
58 for (i = 0; i < nitem; i++)
59 {
60 if (gvs->callback(items + i, gvs->callback_state))
61 {
62 gvs->result->tuples_removed += 1;
63 if (!tmpitems)
64 {
65 /*
66 * First TID to be deleted: allocate memory to hold the
67 * remaining items.
68 */
69 tmpitems = palloc(sizeof(ItemPointerData) * nitem);
70 memcpy(tmpitems, items, sizeof(ItemPointerData) * i);
71 }
72 }
73 else
74 {
75 gvs->result->num_index_tuples += 1;
76 if (tmpitems)
77 tmpitems[remaining] = items[i];
78 remaining++;
79 }
80 }
81
82 *nremaining = remaining;
83 return tmpitems;
84}
85
86/*
87 * Create a WAL record for vacuuming entry tree leaf page.
88 */
89static void
90xlogVacuumPage(Relation index, Buffer buffer)
91{
92 Page page = BufferGetPage(buffer);
93 XLogRecPtr recptr;
94
95 /* This is only used for entry tree leaf pages. */
96 Assert(!GinPageIsData(page));
97 Assert(GinPageIsLeaf(page));
98
99 if (!RelationNeedsWAL(index))
100 return;
101
102 /*
103 * Always create a full image, we don't track the changes on the page at
104 * any more fine-grained level. This could obviously be improved...
105 */
106 XLogBeginInsert();
107 XLogRegisterBuffer(0, buffer, REGBUF_FORCE_IMAGE | REGBUF_STANDARD);
108
109 recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_VACUUM_PAGE);
110 PageSetLSN(page, recptr);
111}
112
113
114typedef struct DataPageDeleteStack
115{
116 struct DataPageDeleteStack *child;
117 struct DataPageDeleteStack *parent;
118
119 BlockNumber blkno; /* current block number */
120 BlockNumber leftBlkno; /* rightest non-deleted page on left */
121 bool isRoot;
122} DataPageDeleteStack;
123
124
125/*
126 * Delete a posting tree page.
127 */
128static void
129ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
130 BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
131{
132 Buffer dBuffer;
133 Buffer lBuffer;
134 Buffer pBuffer;
135 Page page,
136 parentPage;
137 BlockNumber rightlink;
138
139 /*
140 * This function MUST be called only if someone of parent pages hold
141 * exclusive cleanup lock. This guarantees that no insertions currently
142 * happen in this subtree. Caller also acquire Exclusive lock on deletable
143 * page and is acquiring and releasing exclusive lock on left page before.
144 * Left page was locked and released. Then parent and this page are
145 * locked. We acquire left page lock here only to mark page dirty after
146 * changing right pointer.
147 */
148 lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
149 RBM_NORMAL, gvs->strategy);
150 dBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, deleteBlkno,
151 RBM_NORMAL, gvs->strategy);
152 pBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, parentBlkno,
153 RBM_NORMAL, gvs->strategy);
154
155 LockBuffer(lBuffer, GIN_EXCLUSIVE);
156
157 page = BufferGetPage(dBuffer);
158 rightlink = GinPageGetOpaque(page)->rightlink;
159
160 /* For deleted page remember last xid which could knew its address */
161 GinPageSetDeleteXid(page, ReadNewTransactionId());
162
163 /*
164 * Any insert which would have gone on the leaf block will now go to its
165 * right sibling.
166 */
167 PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink);
168
169 START_CRIT_SECTION();
170
171 /* Unlink the page by changing left sibling's rightlink */
172 page = BufferGetPage(lBuffer);
173 GinPageGetOpaque(page)->rightlink = rightlink;
174
175 /* Delete downlink from parent */
176 parentPage = BufferGetPage(pBuffer);
177#ifdef USE_ASSERT_CHECKING
178 do
179 {
180 PostingItem *tod = GinDataPageGetPostingItem(parentPage, myoff);
181
182 Assert(PostingItemGetBlockNumber(tod) == deleteBlkno);
183 } while (0);
184#endif
185 GinPageDeletePostingItem(parentPage, myoff);
186
187 page = BufferGetPage(dBuffer);
188
189 /*
190 * we shouldn't change rightlink field to save workability of running
191 * search scan
192 */
193 GinPageGetOpaque(page)->flags = GIN_DELETED;
194
195 MarkBufferDirty(pBuffer);
196 MarkBufferDirty(lBuffer);
197 MarkBufferDirty(dBuffer);
198
199 if (RelationNeedsWAL(gvs->index))
200 {
201 XLogRecPtr recptr;
202 ginxlogDeletePage data;
203
204 /*
205 * We can't pass REGBUF_STANDARD for the deleted page, because we
206 * didn't set pd_lower on pre-9.4 versions. The page might've been
207 * binary-upgraded from an older version, and hence not have pd_lower
208 * set correctly. Ditto for the left page, but removing the item from
209 * the parent updated its pd_lower, so we know that's OK at this
210 * point.
211 */
212 XLogBeginInsert();
213 XLogRegisterBuffer(0, dBuffer, 0);
214 XLogRegisterBuffer(1, pBuffer, REGBUF_STANDARD);
215 XLogRegisterBuffer(2, lBuffer, 0);
216
217 data.parentOffset = myoff;
218 data.rightLink = GinPageGetOpaque(page)->rightlink;
219 data.deleteXid = GinPageGetDeleteXid(page);
220
221 XLogRegisterData((char *) &data, sizeof(ginxlogDeletePage));
222
223 recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_PAGE);
224 PageSetLSN(page, recptr);
225 PageSetLSN(parentPage, recptr);
226 PageSetLSN(BufferGetPage(lBuffer), recptr);
227 }
228
229 ReleaseBuffer(pBuffer);
230 UnlockReleaseBuffer(lBuffer);
231 ReleaseBuffer(dBuffer);
232
233 END_CRIT_SECTION();
234
235 gvs->result->pages_deleted++;
236}
237
238
239/*
240 * scans posting tree and deletes empty pages
241 */
242static bool
243ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
244 DataPageDeleteStack *parent, OffsetNumber myoff)
245{
246 DataPageDeleteStack *me;
247 Buffer buffer;
248 Page page;
249 bool meDelete = false;
250 bool isempty;
251
252 if (isRoot)
253 {
254 me = parent;
255 }
256 else
257 {
258 if (!parent->child)
259 {
260 me = (DataPageDeleteStack *) palloc0(sizeof(DataPageDeleteStack));
261 me->parent = parent;
262 parent->child = me;
263 me->leftBlkno = InvalidBlockNumber;
264 }
265 else
266 me = parent->child;
267 }
268
269 buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
270 RBM_NORMAL, gvs->strategy);
271
272 if (!isRoot)
273 LockBuffer(buffer, GIN_EXCLUSIVE);
274
275 page = BufferGetPage(buffer);
276
277 Assert(GinPageIsData(page));
278
279 if (!GinPageIsLeaf(page))
280 {
281 OffsetNumber i;
282
283 me->blkno = blkno;
284 for (i = FirstOffsetNumber; i <= GinPageGetOpaque(page)->maxoff; i++)
285 {
286 PostingItem *pitem = GinDataPageGetPostingItem(page, i);
287
288 if (ginScanToDelete(gvs, PostingItemGetBlockNumber(pitem), false, me, i))
289 i--;
290 }
291 }
292
293 if (GinPageIsLeaf(page))
294 isempty = GinDataLeafPageIsEmpty(page);
295 else
296 isempty = GinPageGetOpaque(page)->maxoff < FirstOffsetNumber;
297
298 if (isempty)
299 {
300 /* we never delete the left- or rightmost branch */
301 if (me->leftBlkno != InvalidBlockNumber && !GinPageRightMost(page))
302 {
303 Assert(!isRoot);
304 ginDeletePage(gvs, blkno, me->leftBlkno, me->parent->blkno, myoff, me->parent->isRoot);
305 meDelete = true;
306 }
307 }
308
309 if (!isRoot)
310 LockBuffer(buffer, GIN_UNLOCK);
311
312 ReleaseBuffer(buffer);
313
314 if (!meDelete)
315 me->leftBlkno = blkno;
316
317 return meDelete;
318}
319
320
321/*
322 * Scan through posting tree leafs, delete empty tuples. Returns true if there
323 * is at least one empty page.
324 */
325static bool
326ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno)
327{
328 Buffer buffer;
329 Page page;
330 bool hasVoidPage = false;
331 MemoryContext oldCxt;
332
333 /* Find leftmost leaf page of posting tree and lock it in exclusive mode */
334 while (true)
335 {
336 PostingItem *pitem;
337
338 buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
339 RBM_NORMAL, gvs->strategy);
340 LockBuffer(buffer, GIN_SHARE);
341 page = BufferGetPage(buffer);
342
343 Assert(GinPageIsData(page));
344
345 if (GinPageIsLeaf(page))
346 {
347 LockBuffer(buffer, GIN_UNLOCK);
348 LockBuffer(buffer, GIN_EXCLUSIVE);
349 break;
350 }
351
352 Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber);
353
354 pitem = GinDataPageGetPostingItem(page, FirstOffsetNumber);
355 blkno = PostingItemGetBlockNumber(pitem);
356 Assert(blkno != InvalidBlockNumber);
357
358 UnlockReleaseBuffer(buffer);
359 }
360
361 /* Iterate all posting tree leaves using rightlinks and vacuum them */
362 while (true)
363 {
364 oldCxt = MemoryContextSwitchTo(gvs->tmpCxt);
365 ginVacuumPostingTreeLeaf(gvs->index, buffer, gvs);
366 MemoryContextSwitchTo(oldCxt);
367 MemoryContextReset(gvs->tmpCxt);
368
369 if (GinDataLeafPageIsEmpty(page))
370 hasVoidPage = true;
371
372 blkno = GinPageGetOpaque(page)->rightlink;
373
374 UnlockReleaseBuffer(buffer);
375
376 if (blkno == InvalidBlockNumber)
377 break;
378
379 buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
380 RBM_NORMAL, gvs->strategy);
381 LockBuffer(buffer, GIN_EXCLUSIVE);
382 page = BufferGetPage(buffer);
383 }
384
385 return hasVoidPage;
386}
387
388static void
389ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
390{
391 if (ginVacuumPostingTreeLeaves(gvs, rootBlkno))
392 {
393 /*
394 * There is at least one empty page. So we have to rescan the tree
395 * deleting empty pages.
396 */
397 Buffer buffer;
398 DataPageDeleteStack root,
399 *ptr,
400 *tmp;
401
402 buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno,
403 RBM_NORMAL, gvs->strategy);
404
405 /*
406 * Lock posting tree root for cleanup to ensure there are no
407 * concurrent inserts.
408 */
409 LockBufferForCleanup(buffer);
410
411 memset(&root, 0, sizeof(DataPageDeleteStack));
412 root.leftBlkno = InvalidBlockNumber;
413 root.isRoot = true;
414
415 ginScanToDelete(gvs, rootBlkno, true, &root, InvalidOffsetNumber);
416
417 ptr = root.child;
418
419 while (ptr)
420 {
421 tmp = ptr->child;
422 pfree(ptr);
423 ptr = tmp;
424 }
425
426 UnlockReleaseBuffer(buffer);
427 }
428}
429
430/*
431 * returns modified page or NULL if page isn't modified.
432 * Function works with original page until first change is occurred,
433 * then page is copied into temporary one.
434 */
435static Page
436ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint32 *nroot)
437{
438 Page origpage = BufferGetPage(buffer),
439 tmppage;
440 OffsetNumber i,
441 maxoff = PageGetMaxOffsetNumber(origpage);
442
443 tmppage = origpage;
444
445 *nroot = 0;
446
447 for (i = FirstOffsetNumber; i <= maxoff; i++)
448 {
449 IndexTuple itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i));
450
451 if (GinIsPostingTree(itup))
452 {
453 /*
454 * store posting tree's roots for further processing, we can't
455 * vacuum it just now due to risk of deadlocks with scans/inserts
456 */
457 roots[*nroot] = GinGetDownlink(itup);
458 (*nroot)++;
459 }
460 else if (GinGetNPosting(itup) > 0)
461 {
462 int nitems;
463 ItemPointer items_orig;
464 bool free_items_orig;
465 ItemPointer items;
466
467 /* Get list of item pointers from the tuple. */
468 if (GinItupIsCompressed(itup))
469 {
470 items_orig = ginPostingListDecode((GinPostingList *) GinGetPosting(itup), &nitems);
471 free_items_orig = true;
472 }
473 else
474 {
475 items_orig = (ItemPointer) GinGetPosting(itup);
476 nitems = GinGetNPosting(itup);
477 free_items_orig = false;
478 }
479
480 /* Remove any items from the list that need to be vacuumed. */
481 items = ginVacuumItemPointers(gvs, items_orig, nitems, &nitems);
482
483 if (free_items_orig)
484 pfree(items_orig);
485
486 /* If any item pointers were removed, recreate the tuple. */
487 if (items)
488 {
489 OffsetNumber attnum;
490 Datum key;
491 GinNullCategory category;
492 GinPostingList *plist;
493 int plistsize;
494
495 if (nitems > 0)
496 {
497 plist = ginCompressPostingList(items, nitems, GinMaxItemSize, NULL);
498 plistsize = SizeOfGinPostingList(plist);
499 }
500 else
501 {
502 plist = NULL;
503 plistsize = 0;
504 }
505
506 /*
507 * if we already created a temporary page, make changes in
508 * place
509 */
510 if (tmppage == origpage)
511 {
512 /*
513 * On first difference, create a temporary copy of the
514 * page and copy the tuple's posting list to it.
515 */
516 tmppage = PageGetTempPageCopy(origpage);
517
518 /* set itup pointer to new page */
519 itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i));
520 }
521
522 attnum = gintuple_get_attrnum(&gvs->ginstate, itup);
523 key = gintuple_get_key(&gvs->ginstate, itup, &category);
524 itup = GinFormTuple(&gvs->ginstate, attnum, key, category,
525 (char *) plist, plistsize,
526 nitems, true);
527 if (plist)
528 pfree(plist);
529 PageIndexTupleDelete(tmppage, i);
530
531 if (PageAddItem(tmppage, (Item) itup, IndexTupleSize(itup), i, false, false) != i)
532 elog(ERROR, "failed to add item to index page in \"%s\"",
533 RelationGetRelationName(gvs->index));
534
535 pfree(itup);
536 pfree(items);
537 }
538 }
539 }
540
541 return (tmppage == origpage) ? NULL : tmppage;
542}
543
544IndexBulkDeleteResult *
545ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
546 IndexBulkDeleteCallback callback, void *callback_state)
547{
548 Relation index = info->index;
549 BlockNumber blkno = GIN_ROOT_BLKNO;
550 GinVacuumState gvs;
551 Buffer buffer;
552 BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))];
553 uint32 nRoot;
554
555 gvs.tmpCxt = AllocSetContextCreate(CurrentMemoryContext,
556 "Gin vacuum temporary context",
557 ALLOCSET_DEFAULT_SIZES);
558 gvs.index = index;
559 gvs.callback = callback;
560 gvs.callback_state = callback_state;
561 gvs.strategy = info->strategy;
562 initGinState(&gvs.ginstate, index);
563
564 /* first time through? */
565 if (stats == NULL)
566 {
567 /* Yes, so initialize stats to zeroes */
568 stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
569
570 /*
571 * and cleanup any pending inserts
572 */
573 ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
574 false, true, stats);
575 }
576
577 /* we'll re-count the tuples each time */
578 stats->num_index_tuples = 0;
579 gvs.result = stats;
580
581 buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
582 RBM_NORMAL, info->strategy);
583
584 /* find leaf page */
585 for (;;)
586 {
587 Page page = BufferGetPage(buffer);
588 IndexTuple itup;
589
590 LockBuffer(buffer, GIN_SHARE);
591
592 Assert(!GinPageIsData(page));
593
594 if (GinPageIsLeaf(page))
595 {
596 LockBuffer(buffer, GIN_UNLOCK);
597 LockBuffer(buffer, GIN_EXCLUSIVE);
598
599 if (blkno == GIN_ROOT_BLKNO && !GinPageIsLeaf(page))
600 {
601 LockBuffer(buffer, GIN_UNLOCK);
602 continue; /* check it one more */
603 }
604 break;
605 }
606
607 Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber);
608
609 itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber));
610 blkno = GinGetDownlink(itup);
611 Assert(blkno != InvalidBlockNumber);
612
613 UnlockReleaseBuffer(buffer);
614 buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
615 RBM_NORMAL, info->strategy);
616 }
617
618 /* right now we found leftmost page in entry's BTree */
619
620 for (;;)
621 {
622 Page page = BufferGetPage(buffer);
623 Page resPage;
624 uint32 i;
625
626 Assert(!GinPageIsData(page));
627
628 resPage = ginVacuumEntryPage(&gvs, buffer, rootOfPostingTree, &nRoot);
629
630 blkno = GinPageGetOpaque(page)->rightlink;
631
632 if (resPage)
633 {
634 START_CRIT_SECTION();
635 PageRestoreTempPage(resPage, page);
636 MarkBufferDirty(buffer);
637 xlogVacuumPage(gvs.index, buffer);
638 UnlockReleaseBuffer(buffer);
639 END_CRIT_SECTION();
640 }
641 else
642 {
643 UnlockReleaseBuffer(buffer);
644 }
645
646 vacuum_delay_point();
647
648 for (i = 0; i < nRoot; i++)
649 {
650 ginVacuumPostingTree(&gvs, rootOfPostingTree[i]);
651 vacuum_delay_point();
652 }
653
654 if (blkno == InvalidBlockNumber) /* rightmost page */
655 break;
656
657 buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
658 RBM_NORMAL, info->strategy);
659 LockBuffer(buffer, GIN_EXCLUSIVE);
660 }
661
662 MemoryContextDelete(gvs.tmpCxt);
663
664 return gvs.result;
665}
666
667IndexBulkDeleteResult *
668ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
669{
670 Relation index = info->index;
671 bool needLock;
672 BlockNumber npages,
673 blkno;
674 BlockNumber totFreePages;
675 GinState ginstate;
676 GinStatsData idxStat;
677
678 /*
679 * In an autovacuum analyze, we want to clean up pending insertions.
680 * Otherwise, an ANALYZE-only call is a no-op.
681 */
682 if (info->analyze_only)
683 {
684 if (IsAutoVacuumWorkerProcess())
685 {
686 initGinState(&ginstate, index);
687 ginInsertCleanup(&ginstate, false, true, true, stats);
688 }
689 return stats;
690 }
691
692 /*
693 * Set up all-zero stats and cleanup pending inserts if ginbulkdelete
694 * wasn't called
695 */
696 if (stats == NULL)
697 {
698 stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
699 initGinState(&ginstate, index);
700 ginInsertCleanup(&ginstate, !IsAutoVacuumWorkerProcess(),
701 false, true, stats);
702 }
703
704 memset(&idxStat, 0, sizeof(idxStat));
705
706 /*
707 * XXX we always report the heap tuple count as the number of index
708 * entries. This is bogus if the index is partial, but it's real hard to
709 * tell how many distinct heap entries are referenced by a GIN index.
710 */
711 stats->num_index_tuples = info->num_heap_tuples;
712 stats->estimated_count = info->estimated_count;
713
714 /*
715 * Need lock unless it's local to this backend.
716 */
717 needLock = !RELATION_IS_LOCAL(index);
718
719 if (needLock)
720 LockRelationForExtension(index, ExclusiveLock);
721 npages = RelationGetNumberOfBlocks(index);
722 if (needLock)
723 UnlockRelationForExtension(index, ExclusiveLock);
724
725 totFreePages = 0;
726
727 for (blkno = GIN_ROOT_BLKNO; blkno < npages; blkno++)
728 {
729 Buffer buffer;
730 Page page;
731
732 vacuum_delay_point();
733
734 buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
735 RBM_NORMAL, info->strategy);
736 LockBuffer(buffer, GIN_SHARE);
737 page = (Page) BufferGetPage(buffer);
738
739 if (GinPageIsRecyclable(page))
740 {
741 Assert(blkno != GIN_ROOT_BLKNO);
742 RecordFreeIndexPage(index, blkno);
743 totFreePages++;
744 }
745 else if (GinPageIsData(page))
746 {
747 idxStat.nDataPages++;
748 }
749 else if (!GinPageIsList(page))
750 {
751 idxStat.nEntryPages++;
752
753 if (GinPageIsLeaf(page))
754 idxStat.nEntries += PageGetMaxOffsetNumber(page);
755 }
756
757 UnlockReleaseBuffer(buffer);
758 }
759
760 /* Update the metapage with accurate page and entry counts */
761 idxStat.nTotalPages = npages;
762 ginUpdateStats(info->index, &idxStat, false);
763
764 /* Finally, vacuum the FSM */
765 IndexFreeSpaceMapVacuum(info->index);
766
767 stats->pages_free = totFreePages;
768
769 if (needLock)
770 LockRelationForExtension(index, ExclusiveLock);
771 stats->num_pages = RelationGetNumberOfBlocks(index);
772 if (needLock)
773 UnlockRelationForExtension(index, ExclusiveLock);
774
775 return stats;
776}
777