1 | /*------------------------------------------------------------------------- |
2 | * |
3 | * visibilitymap.c |
4 | * bitmap for tracking visibility of heap tuples |
5 | * |
6 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
7 | * Portions Copyright (c) 1994, Regents of the University of California |
8 | * |
9 | * |
10 | * IDENTIFICATION |
11 | * src/backend/access/heap/visibilitymap.c |
12 | * |
13 | * INTERFACE ROUTINES |
14 | * visibilitymap_clear - clear bits for one page in the visibility map |
15 | * visibilitymap_pin - pin a map page for setting a bit |
16 | * visibilitymap_pin_ok - check whether correct map page is already pinned |
17 | * visibilitymap_set - set a bit in a previously pinned page |
18 | * visibilitymap_get_status - get status of bits |
19 | * visibilitymap_count - count number of bits set in visibility map |
20 | * visibilitymap_truncate - truncate the visibility map |
21 | * |
22 | * NOTES |
23 | * |
24 | * The visibility map is a bitmap with two bits (all-visible and all-frozen) |
25 | * per heap page. A set all-visible bit means that all tuples on the page are |
26 | * known visible to all transactions, and therefore the page doesn't need to |
27 | * be vacuumed. A set all-frozen bit means that all tuples on the page are |
28 | * completely frozen, and therefore the page doesn't need to be vacuumed even |
29 | * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum). |
30 | * The all-frozen bit must be set only when the page is already all-visible. |
31 | * |
32 | * The map is conservative in the sense that we make sure that whenever a bit |
33 | * is set, we know the condition is true, but if a bit is not set, it might or |
34 | * might not be true. |
35 | * |
36 | * Clearing visibility map bits is not separately WAL-logged. The callers |
37 | * must make sure that whenever a bit is cleared, the bit is cleared on WAL |
38 | * replay of the updating operation as well. |
39 | * |
40 | * When we *set* a visibility map during VACUUM, we must write WAL. This may |
41 | * seem counterintuitive, since the bit is basically a hint: if it is clear, |
42 | * it may still be the case that every tuple on the page is visible to all |
43 | * transactions; we just don't know that for certain. The difficulty is that |
44 | * there are two bits which are typically set together: the PD_ALL_VISIBLE bit |
45 | * on the page itself, and the visibility map bit. If a crash occurs after the |
46 | * visibility map page makes it to disk and before the updated heap page makes |
47 | * it to disk, redo must set the bit on the heap page. Otherwise, the next |
48 | * insert, update, or delete on the heap page will fail to realize that the |
49 | * visibility map bit must be cleared, possibly causing index-only scans to |
50 | * return wrong answers. |
51 | * |
52 | * VACUUM will normally skip pages for which the visibility map bit is set; |
53 | * such pages can't contain any dead tuples and therefore don't need vacuuming. |
54 | * |
55 | * LOCKING |
56 | * |
57 | * In heapam.c, whenever a page is modified so that not all tuples on the |
58 | * page are visible to everyone anymore, the corresponding bit in the |
59 | * visibility map is cleared. In order to be crash-safe, we need to do this |
60 | * while still holding a lock on the heap page and in the same critical |
61 | * section that logs the page modification. However, we don't want to hold |
62 | * the buffer lock over any I/O that may be required to read in the visibility |
63 | * map page. To avoid this, we examine the heap page before locking it; |
64 | * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map |
65 | * bit. Then, we lock the buffer. But this creates a race condition: there |
66 | * is a possibility that in the time it takes to lock the buffer, the |
67 | * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the |
68 | * buffer, pin the visibility map page, and relock the buffer. This shouldn't |
69 | * happen often, because only VACUUM currently sets visibility map bits, |
70 | * and the race will only occur if VACUUM processes a given page at almost |
71 | * exactly the same time that someone tries to further modify it. |
72 | * |
73 | * To set a bit, you need to hold a lock on the heap page. That prevents |
74 | * the race condition where VACUUM sees that all tuples on the page are |
75 | * visible to everyone, but another backend modifies the page before VACUUM |
76 | * sets the bit in the visibility map. |
77 | * |
78 | * When a bit is set, the LSN of the visibility map page is updated to make |
79 | * sure that the visibility map update doesn't get written to disk before the |
80 | * WAL record of the changes that made it possible to set the bit is flushed. |
81 | * But when a bit is cleared, we don't have to do that because it's always |
82 | * safe to clear a bit in the map from correctness point of view. |
83 | * |
84 | *------------------------------------------------------------------------- |
85 | */ |
86 | #include "postgres.h" |
87 | |
88 | #include "access/heapam_xlog.h" |
89 | #include "access/visibilitymap.h" |
90 | #include "access/xlog.h" |
91 | #include "miscadmin.h" |
92 | #include "port/pg_bitutils.h" |
93 | #include "storage/bufmgr.h" |
94 | #include "storage/lmgr.h" |
95 | #include "storage/smgr.h" |
96 | #include "utils/inval.h" |
97 | |
98 | |
99 | /*#define TRACE_VISIBILITYMAP */ |
100 | |
101 | /* |
102 | * Size of the bitmap on each visibility map page, in bytes. There's no |
103 | * extra headers, so the whole page minus the standard page header is |
104 | * used for the bitmap. |
105 | */ |
106 | #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData)) |
107 | |
108 | /* Number of heap blocks we can represent in one byte */ |
109 | #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK) |
110 | |
111 | /* Number of heap blocks we can represent in one visibility map page. */ |
112 | #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE) |
113 | |
114 | /* Mapping from heap block number to the right bit in the visibility map */ |
115 | #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE) |
116 | #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE) |
117 | #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK) |
118 | |
119 | /* Masks for counting subsets of bits in the visibility map. */ |
120 | #define VISIBLE_MASK64 UINT64CONST(0x5555555555555555) /* The lower bit of each |
121 | * bit pair */ |
122 | #define FROZEN_MASK64 UINT64CONST(0xaaaaaaaaaaaaaaaa) /* The upper bit of each |
123 | * bit pair */ |
124 | |
125 | /* prototypes for internal routines */ |
126 | static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend); |
127 | static void vm_extend(Relation rel, BlockNumber nvmblocks); |
128 | |
129 | |
130 | /* |
131 | * visibilitymap_clear - clear specified bits for one page in visibility map |
132 | * |
133 | * You must pass a buffer containing the correct map page to this function. |
134 | * Call visibilitymap_pin first to pin the right one. This function doesn't do |
135 | * any I/O. Returns true if any bits have been cleared and false otherwise. |
136 | */ |
137 | bool |
138 | visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags) |
139 | { |
140 | BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); |
141 | int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); |
142 | int mapOffset = HEAPBLK_TO_OFFSET(heapBlk); |
143 | uint8 mask = flags << mapOffset; |
144 | char *map; |
145 | bool cleared = false; |
146 | |
147 | Assert(flags & VISIBILITYMAP_VALID_BITS); |
148 | |
149 | #ifdef TRACE_VISIBILITYMAP |
150 | elog(DEBUG1, "vm_clear %s %d" , RelationGetRelationName(rel), heapBlk); |
151 | #endif |
152 | |
153 | if (!BufferIsValid(buf) || BufferGetBlockNumber(buf) != mapBlock) |
154 | elog(ERROR, "wrong buffer passed to visibilitymap_clear" ); |
155 | |
156 | LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); |
157 | map = PageGetContents(BufferGetPage(buf)); |
158 | |
159 | if (map[mapByte] & mask) |
160 | { |
161 | map[mapByte] &= ~mask; |
162 | |
163 | MarkBufferDirty(buf); |
164 | cleared = true; |
165 | } |
166 | |
167 | LockBuffer(buf, BUFFER_LOCK_UNLOCK); |
168 | |
169 | return cleared; |
170 | } |
171 | |
172 | /* |
173 | * visibilitymap_pin - pin a map page for setting a bit |
174 | * |
175 | * Setting a bit in the visibility map is a two-phase operation. First, call |
176 | * visibilitymap_pin, to pin the visibility map page containing the bit for |
177 | * the heap page. Because that can require I/O to read the map page, you |
178 | * shouldn't hold a lock on the heap page while doing that. Then, call |
179 | * visibilitymap_set to actually set the bit. |
180 | * |
181 | * On entry, *buf should be InvalidBuffer or a valid buffer returned by |
182 | * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same |
183 | * relation. On return, *buf is a valid buffer with the map page containing |
184 | * the bit for heapBlk. |
185 | * |
186 | * If the page doesn't exist in the map file yet, it is extended. |
187 | */ |
188 | void |
189 | visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf) |
190 | { |
191 | BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); |
192 | |
193 | /* Reuse the old pinned buffer if possible */ |
194 | if (BufferIsValid(*buf)) |
195 | { |
196 | if (BufferGetBlockNumber(*buf) == mapBlock) |
197 | return; |
198 | |
199 | ReleaseBuffer(*buf); |
200 | } |
201 | *buf = vm_readbuf(rel, mapBlock, true); |
202 | } |
203 | |
204 | /* |
205 | * visibilitymap_pin_ok - do we already have the correct page pinned? |
206 | * |
207 | * On entry, buf should be InvalidBuffer or a valid buffer returned by |
208 | * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same |
209 | * relation. The return value indicates whether the buffer covers the |
210 | * given heapBlk. |
211 | */ |
212 | bool |
213 | visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf) |
214 | { |
215 | BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); |
216 | |
217 | return BufferIsValid(buf) && BufferGetBlockNumber(buf) == mapBlock; |
218 | } |
219 | |
220 | /* |
221 | * visibilitymap_set - set bit(s) on a previously pinned page |
222 | * |
223 | * recptr is the LSN of the XLOG record we're replaying, if we're in recovery, |
224 | * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the |
225 | * one provided; in normal running, we generate a new XLOG record and set the |
226 | * page LSN to that value. cutoff_xid is the largest xmin on the page being |
227 | * marked all-visible; it is needed for Hot Standby, and can be |
228 | * InvalidTransactionId if the page contains no tuples. It can also be set |
229 | * to InvalidTransactionId when a page that is already all-visible is being |
230 | * marked all-frozen. |
231 | * |
232 | * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling |
233 | * this function. Except in recovery, caller should also pass the heap |
234 | * buffer. When checksums are enabled and we're not in recovery, we must add |
235 | * the heap buffer to the WAL chain to protect it from being torn. |
236 | * |
237 | * You must pass a buffer containing the correct map page to this function. |
238 | * Call visibilitymap_pin first to pin the right one. This function doesn't do |
239 | * any I/O. |
240 | */ |
241 | void |
242 | visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, |
243 | XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, |
244 | uint8 flags) |
245 | { |
246 | BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); |
247 | uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); |
248 | uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); |
249 | Page page; |
250 | uint8 *map; |
251 | |
252 | #ifdef TRACE_VISIBILITYMAP |
253 | elog(DEBUG1, "vm_set %s %d" , RelationGetRelationName(rel), heapBlk); |
254 | #endif |
255 | |
256 | Assert(InRecovery || XLogRecPtrIsInvalid(recptr)); |
257 | Assert(InRecovery || BufferIsValid(heapBuf)); |
258 | Assert(flags & VISIBILITYMAP_VALID_BITS); |
259 | |
260 | /* Check that we have the right heap page pinned, if present */ |
261 | if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk) |
262 | elog(ERROR, "wrong heap buffer passed to visibilitymap_set" ); |
263 | |
264 | /* Check that we have the right VM page pinned */ |
265 | if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock) |
266 | elog(ERROR, "wrong VM buffer passed to visibilitymap_set" ); |
267 | |
268 | page = BufferGetPage(vmBuf); |
269 | map = (uint8 *) PageGetContents(page); |
270 | LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE); |
271 | |
272 | if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS)) |
273 | { |
274 | START_CRIT_SECTION(); |
275 | |
276 | map[mapByte] |= (flags << mapOffset); |
277 | MarkBufferDirty(vmBuf); |
278 | |
279 | if (RelationNeedsWAL(rel)) |
280 | { |
281 | if (XLogRecPtrIsInvalid(recptr)) |
282 | { |
283 | Assert(!InRecovery); |
284 | recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf, |
285 | cutoff_xid, flags); |
286 | |
287 | /* |
288 | * If data checksums are enabled (or wal_log_hints=on), we |
289 | * need to protect the heap page from being torn. |
290 | */ |
291 | if (XLogHintBitIsNeeded()) |
292 | { |
293 | Page heapPage = BufferGetPage(heapBuf); |
294 | |
295 | /* caller is expected to set PD_ALL_VISIBLE first */ |
296 | Assert(PageIsAllVisible(heapPage)); |
297 | PageSetLSN(heapPage, recptr); |
298 | } |
299 | } |
300 | PageSetLSN(page, recptr); |
301 | } |
302 | |
303 | END_CRIT_SECTION(); |
304 | } |
305 | |
306 | LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK); |
307 | } |
308 | |
309 | /* |
310 | * visibilitymap_get_status - get status of bits |
311 | * |
312 | * Are all tuples on heapBlk visible to all or are marked frozen, according |
313 | * to the visibility map? |
314 | * |
315 | * On entry, *buf should be InvalidBuffer or a valid buffer returned by an |
316 | * earlier call to visibilitymap_pin or visibilitymap_get_status on the same |
317 | * relation. On return, *buf is a valid buffer with the map page containing |
318 | * the bit for heapBlk, or InvalidBuffer. The caller is responsible for |
319 | * releasing *buf after it's done testing and setting bits. |
320 | * |
321 | * NOTE: This function is typically called without a lock on the heap page, |
322 | * so somebody else could change the bit just after we look at it. In fact, |
323 | * since we don't lock the visibility map page either, it's even possible that |
324 | * someone else could have changed the bit just before we look at it, but yet |
325 | * we might see the old value. It is the caller's responsibility to deal with |
326 | * all concurrency issues! |
327 | */ |
328 | uint8 |
329 | visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf) |
330 | { |
331 | BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); |
332 | uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); |
333 | uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); |
334 | char *map; |
335 | uint8 result; |
336 | |
337 | #ifdef TRACE_VISIBILITYMAP |
338 | elog(DEBUG1, "vm_get_status %s %d" , RelationGetRelationName(rel), heapBlk); |
339 | #endif |
340 | |
341 | /* Reuse the old pinned buffer if possible */ |
342 | if (BufferIsValid(*buf)) |
343 | { |
344 | if (BufferGetBlockNumber(*buf) != mapBlock) |
345 | { |
346 | ReleaseBuffer(*buf); |
347 | *buf = InvalidBuffer; |
348 | } |
349 | } |
350 | |
351 | if (!BufferIsValid(*buf)) |
352 | { |
353 | *buf = vm_readbuf(rel, mapBlock, false); |
354 | if (!BufferIsValid(*buf)) |
355 | return false; |
356 | } |
357 | |
358 | map = PageGetContents(BufferGetPage(*buf)); |
359 | |
360 | /* |
361 | * A single byte read is atomic. There could be memory-ordering effects |
362 | * here, but for performance reasons we make it the caller's job to worry |
363 | * about that. |
364 | */ |
365 | result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS); |
366 | return result; |
367 | } |
368 | |
369 | /* |
370 | * visibilitymap_count - count number of bits set in visibility map |
371 | * |
372 | * Note: we ignore the possibility of race conditions when the table is being |
373 | * extended concurrently with the call. New pages added to the table aren't |
374 | * going to be marked all-visible or all-frozen, so they won't affect the result. |
375 | */ |
376 | void |
377 | visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen) |
378 | { |
379 | BlockNumber mapBlock; |
380 | BlockNumber nvisible = 0; |
381 | BlockNumber nfrozen = 0; |
382 | |
383 | /* all_visible must be specified */ |
384 | Assert(all_visible); |
385 | |
386 | for (mapBlock = 0;; mapBlock++) |
387 | { |
388 | Buffer mapBuffer; |
389 | uint64 *map; |
390 | int i; |
391 | |
392 | /* |
393 | * Read till we fall off the end of the map. We assume that any extra |
394 | * bytes in the last page are zeroed, so we don't bother excluding |
395 | * them from the count. |
396 | */ |
397 | mapBuffer = vm_readbuf(rel, mapBlock, false); |
398 | if (!BufferIsValid(mapBuffer)) |
399 | break; |
400 | |
401 | /* |
402 | * We choose not to lock the page, since the result is going to be |
403 | * immediately stale anyway if anyone is concurrently setting or |
404 | * clearing bits, and we only really need an approximate value. |
405 | */ |
406 | map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer)); |
407 | |
408 | StaticAssertStmt(MAPSIZE % sizeof(uint64) == 0, |
409 | "unsupported MAPSIZE" ); |
410 | if (all_frozen == NULL) |
411 | { |
412 | for (i = 0; i < MAPSIZE / sizeof(uint64); i++) |
413 | nvisible += pg_popcount64(map[i] & VISIBLE_MASK64); |
414 | } |
415 | else |
416 | { |
417 | for (i = 0; i < MAPSIZE / sizeof(uint64); i++) |
418 | { |
419 | nvisible += pg_popcount64(map[i] & VISIBLE_MASK64); |
420 | nfrozen += pg_popcount64(map[i] & FROZEN_MASK64); |
421 | } |
422 | } |
423 | |
424 | ReleaseBuffer(mapBuffer); |
425 | } |
426 | |
427 | *all_visible = nvisible; |
428 | if (all_frozen) |
429 | *all_frozen = nfrozen; |
430 | } |
431 | |
432 | /* |
433 | * visibilitymap_truncate - truncate the visibility map |
434 | * |
435 | * The caller must hold AccessExclusiveLock on the relation, to ensure that |
436 | * other backends receive the smgr invalidation event that this function sends |
437 | * before they access the VM again. |
438 | * |
439 | * nheapblocks is the new size of the heap. |
440 | */ |
441 | void |
442 | visibilitymap_truncate(Relation rel, BlockNumber nheapblocks) |
443 | { |
444 | BlockNumber newnblocks; |
445 | |
446 | /* last remaining block, byte, and bit */ |
447 | BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks); |
448 | uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks); |
449 | uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks); |
450 | |
451 | #ifdef TRACE_VISIBILITYMAP |
452 | elog(DEBUG1, "vm_truncate %s %d" , RelationGetRelationName(rel), nheapblocks); |
453 | #endif |
454 | |
455 | RelationOpenSmgr(rel); |
456 | |
457 | /* |
458 | * If no visibility map has been created yet for this relation, there's |
459 | * nothing to truncate. |
460 | */ |
461 | if (!smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM)) |
462 | return; |
463 | |
464 | /* |
465 | * Unless the new size is exactly at a visibility map page boundary, the |
466 | * tail bits in the last remaining map page, representing truncated heap |
467 | * blocks, need to be cleared. This is not only tidy, but also necessary |
468 | * because we don't get a chance to clear the bits if the heap is extended |
469 | * again. |
470 | */ |
471 | if (truncByte != 0 || truncOffset != 0) |
472 | { |
473 | Buffer mapBuffer; |
474 | Page page; |
475 | char *map; |
476 | |
477 | newnblocks = truncBlock + 1; |
478 | |
479 | mapBuffer = vm_readbuf(rel, truncBlock, false); |
480 | if (!BufferIsValid(mapBuffer)) |
481 | { |
482 | /* nothing to do, the file was already smaller */ |
483 | return; |
484 | } |
485 | |
486 | page = BufferGetPage(mapBuffer); |
487 | map = PageGetContents(page); |
488 | |
489 | LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE); |
490 | |
491 | /* NO EREPORT(ERROR) from here till changes are logged */ |
492 | START_CRIT_SECTION(); |
493 | |
494 | /* Clear out the unwanted bytes. */ |
495 | MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1)); |
496 | |
497 | /*---- |
498 | * Mask out the unwanted bits of the last remaining byte. |
499 | * |
500 | * ((1 << 0) - 1) = 00000000 |
501 | * ((1 << 1) - 1) = 00000001 |
502 | * ... |
503 | * ((1 << 6) - 1) = 00111111 |
504 | * ((1 << 7) - 1) = 01111111 |
505 | *---- |
506 | */ |
507 | map[truncByte] &= (1 << truncOffset) - 1; |
508 | |
509 | /* |
510 | * Truncation of a relation is WAL-logged at a higher-level, and we |
511 | * will be called at WAL replay. But if checksums are enabled, we need |
512 | * to still write a WAL record to protect against a torn page, if the |
513 | * page is flushed to disk before the truncation WAL record. We cannot |
514 | * use MarkBufferDirtyHint here, because that will not dirty the page |
515 | * during recovery. |
516 | */ |
517 | MarkBufferDirty(mapBuffer); |
518 | if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded()) |
519 | log_newpage_buffer(mapBuffer, false); |
520 | |
521 | END_CRIT_SECTION(); |
522 | |
523 | UnlockReleaseBuffer(mapBuffer); |
524 | } |
525 | else |
526 | newnblocks = truncBlock; |
527 | |
528 | if (smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM) <= newnblocks) |
529 | { |
530 | /* nothing to do, the file was already smaller than requested size */ |
531 | return; |
532 | } |
533 | |
534 | /* Truncate the unused VM pages, and send smgr inval message */ |
535 | smgrtruncate(rel->rd_smgr, VISIBILITYMAP_FORKNUM, newnblocks); |
536 | |
537 | /* |
538 | * We might as well update the local smgr_vm_nblocks setting. smgrtruncate |
539 | * sent an smgr cache inval message, which will cause other backends to |
540 | * invalidate their copy of smgr_vm_nblocks, and this one too at the next |
541 | * command boundary. But this ensures it isn't outright wrong until then. |
542 | */ |
543 | if (rel->rd_smgr) |
544 | rel->rd_smgr->smgr_vm_nblocks = newnblocks; |
545 | } |
546 | |
547 | /* |
548 | * Read a visibility map page. |
549 | * |
550 | * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is |
551 | * true, the visibility map file is extended. |
552 | */ |
553 | static Buffer |
554 | vm_readbuf(Relation rel, BlockNumber blkno, bool extend) |
555 | { |
556 | Buffer buf; |
557 | |
558 | /* |
559 | * We might not have opened the relation at the smgr level yet, or we |
560 | * might have been forced to close it by a sinval message. The code below |
561 | * won't necessarily notice relation extension immediately when extend = |
562 | * false, so we rely on sinval messages to ensure that our ideas about the |
563 | * size of the map aren't too far out of date. |
564 | */ |
565 | RelationOpenSmgr(rel); |
566 | |
567 | /* |
568 | * If we haven't cached the size of the visibility map fork yet, check it |
569 | * first. |
570 | */ |
571 | if (rel->rd_smgr->smgr_vm_nblocks == InvalidBlockNumber) |
572 | { |
573 | if (smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM)) |
574 | rel->rd_smgr->smgr_vm_nblocks = smgrnblocks(rel->rd_smgr, |
575 | VISIBILITYMAP_FORKNUM); |
576 | else |
577 | rel->rd_smgr->smgr_vm_nblocks = 0; |
578 | } |
579 | |
580 | /* Handle requests beyond EOF */ |
581 | if (blkno >= rel->rd_smgr->smgr_vm_nblocks) |
582 | { |
583 | if (extend) |
584 | vm_extend(rel, blkno + 1); |
585 | else |
586 | return InvalidBuffer; |
587 | } |
588 | |
589 | /* |
590 | * Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's |
591 | * always safe to clear bits, so it's better to clear corrupt pages than |
592 | * error out. |
593 | * |
594 | * The initialize-the-page part is trickier than it looks, because of the |
595 | * possibility of multiple backends doing this concurrently, and our |
596 | * desire to not uselessly take the buffer lock in the normal path where |
597 | * the page is OK. We must take the lock to initialize the page, so |
598 | * recheck page newness after we have the lock, in case someone else |
599 | * already did it. Also, because we initially check PageIsNew with no |
600 | * lock, it's possible to fall through and return the buffer while someone |
601 | * else is still initializing the page (i.e., we might see pd_upper as set |
602 | * but other page header fields are still zeroes). This is harmless for |
603 | * callers that will take a buffer lock themselves, but some callers |
604 | * inspect the page without any lock at all. The latter is OK only so |
605 | * long as it doesn't depend on the page header having correct contents. |
606 | * Current usage is safe because PageGetContents() does not require that. |
607 | */ |
608 | buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno, |
609 | RBM_ZERO_ON_ERROR, NULL); |
610 | if (PageIsNew(BufferGetPage(buf))) |
611 | { |
612 | LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); |
613 | if (PageIsNew(BufferGetPage(buf))) |
614 | PageInit(BufferGetPage(buf), BLCKSZ, 0); |
615 | LockBuffer(buf, BUFFER_LOCK_UNLOCK); |
616 | } |
617 | return buf; |
618 | } |
619 | |
620 | /* |
621 | * Ensure that the visibility map fork is at least vm_nblocks long, extending |
622 | * it if necessary with zeroed pages. |
623 | */ |
624 | static void |
625 | vm_extend(Relation rel, BlockNumber vm_nblocks) |
626 | { |
627 | BlockNumber vm_nblocks_now; |
628 | PGAlignedBlock pg; |
629 | |
630 | PageInit((Page) pg.data, BLCKSZ, 0); |
631 | |
632 | /* |
633 | * We use the relation extension lock to lock out other backends trying to |
634 | * extend the visibility map at the same time. It also locks out extension |
635 | * of the main fork, unnecessarily, but extending the visibility map |
636 | * happens seldom enough that it doesn't seem worthwhile to have a |
637 | * separate lock tag type for it. |
638 | * |
639 | * Note that another backend might have extended or created the relation |
640 | * by the time we get the lock. |
641 | */ |
642 | LockRelationForExtension(rel, ExclusiveLock); |
643 | |
644 | /* Might have to re-open if a cache flush happened */ |
645 | RelationOpenSmgr(rel); |
646 | |
647 | /* |
648 | * Create the file first if it doesn't exist. If smgr_vm_nblocks is |
649 | * positive then it must exist, no need for an smgrexists call. |
650 | */ |
651 | if ((rel->rd_smgr->smgr_vm_nblocks == 0 || |
652 | rel->rd_smgr->smgr_vm_nblocks == InvalidBlockNumber) && |
653 | !smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM)) |
654 | smgrcreate(rel->rd_smgr, VISIBILITYMAP_FORKNUM, false); |
655 | |
656 | vm_nblocks_now = smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM); |
657 | |
658 | /* Now extend the file */ |
659 | while (vm_nblocks_now < vm_nblocks) |
660 | { |
661 | PageSetChecksumInplace((Page) pg.data, vm_nblocks_now); |
662 | |
663 | smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now, |
664 | pg.data, false); |
665 | vm_nblocks_now++; |
666 | } |
667 | |
668 | /* |
669 | * Send a shared-inval message to force other backends to close any smgr |
670 | * references they may have for this rel, which we are about to change. |
671 | * This is a useful optimization because it means that backends don't have |
672 | * to keep checking for creation or extension of the file, which happens |
673 | * infrequently. |
674 | */ |
675 | CacheInvalidateSmgr(rel->rd_smgr->smgr_rnode); |
676 | |
677 | /* Update local cache with the up-to-date size */ |
678 | rel->rd_smgr->smgr_vm_nblocks = vm_nblocks_now; |
679 | |
680 | UnlockRelationForExtension(rel, ExclusiveLock); |
681 | } |
682 | |