1 | /*------------------------------------------------------------------------- |
2 | * |
3 | * heapam_xlog.h |
4 | * POSTGRES heap access XLOG definitions. |
5 | * |
6 | * |
7 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
8 | * Portions Copyright (c) 1994, Regents of the University of California |
9 | * |
10 | * src/include/access/heapam_xlog.h |
11 | * |
12 | *------------------------------------------------------------------------- |
13 | */ |
14 | #ifndef HEAPAM_XLOG_H |
15 | #define HEAPAM_XLOG_H |
16 | |
17 | #include "access/htup.h" |
18 | #include "access/xlogreader.h" |
19 | #include "lib/stringinfo.h" |
20 | #include "storage/buf.h" |
21 | #include "storage/bufpage.h" |
22 | #include "storage/relfilenode.h" |
23 | #include "utils/relcache.h" |
24 | |
25 | |
26 | /* |
27 | * WAL record definitions for heapam.c's WAL operations |
28 | * |
29 | * XLOG allows to store some information in high 4 bits of log |
30 | * record xl_info field. We use 3 for opcode and one for init bit. |
31 | */ |
32 | #define XLOG_HEAP_INSERT 0x00 |
33 | #define XLOG_HEAP_DELETE 0x10 |
34 | #define XLOG_HEAP_UPDATE 0x20 |
35 | #define XLOG_HEAP_TRUNCATE 0x30 |
36 | #define XLOG_HEAP_HOT_UPDATE 0x40 |
37 | #define XLOG_HEAP_CONFIRM 0x50 |
38 | #define XLOG_HEAP_LOCK 0x60 |
39 | #define XLOG_HEAP_INPLACE 0x70 |
40 | |
41 | #define XLOG_HEAP_OPMASK 0x70 |
42 | /* |
43 | * When we insert 1st item on new page in INSERT, UPDATE, HOT_UPDATE, |
44 | * or MULTI_INSERT, we can (and we do) restore entire page in redo |
45 | */ |
46 | #define XLOG_HEAP_INIT_PAGE 0x80 |
47 | /* |
48 | * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes |
49 | * are associated with RM_HEAP2_ID, but are not logically different from |
50 | * the ones above associated with RM_HEAP_ID. XLOG_HEAP_OPMASK applies to |
51 | * these, too. |
52 | */ |
53 | #define XLOG_HEAP2_REWRITE 0x00 |
54 | #define XLOG_HEAP2_CLEAN 0x10 |
55 | #define XLOG_HEAP2_FREEZE_PAGE 0x20 |
56 | #define XLOG_HEAP2_CLEANUP_INFO 0x30 |
57 | #define XLOG_HEAP2_VISIBLE 0x40 |
58 | #define XLOG_HEAP2_MULTI_INSERT 0x50 |
59 | #define XLOG_HEAP2_LOCK_UPDATED 0x60 |
60 | #define XLOG_HEAP2_NEW_CID 0x70 |
61 | |
62 | /* |
63 | * xl_heap_insert/xl_heap_multi_insert flag values, 8 bits are available. |
64 | */ |
65 | /* PD_ALL_VISIBLE was cleared */ |
66 | #define XLH_INSERT_ALL_VISIBLE_CLEARED (1<<0) |
67 | #define XLH_INSERT_LAST_IN_MULTI (1<<1) |
68 | #define XLH_INSERT_IS_SPECULATIVE (1<<2) |
69 | #define XLH_INSERT_CONTAINS_NEW_TUPLE (1<<3) |
70 | |
71 | /* |
72 | * xl_heap_update flag values, 8 bits are available. |
73 | */ |
74 | /* PD_ALL_VISIBLE was cleared */ |
75 | #define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED (1<<0) |
76 | /* PD_ALL_VISIBLE was cleared in the 2nd page */ |
77 | #define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED (1<<1) |
78 | #define XLH_UPDATE_CONTAINS_OLD_TUPLE (1<<2) |
79 | #define XLH_UPDATE_CONTAINS_OLD_KEY (1<<3) |
80 | #define XLH_UPDATE_CONTAINS_NEW_TUPLE (1<<4) |
81 | #define XLH_UPDATE_PREFIX_FROM_OLD (1<<5) |
82 | #define XLH_UPDATE_SUFFIX_FROM_OLD (1<<6) |
83 | |
84 | /* convenience macro for checking whether any form of old tuple was logged */ |
85 | #define XLH_UPDATE_CONTAINS_OLD \ |
86 | (XLH_UPDATE_CONTAINS_OLD_TUPLE | XLH_UPDATE_CONTAINS_OLD_KEY) |
87 | |
88 | /* |
89 | * xl_heap_delete flag values, 8 bits are available. |
90 | */ |
91 | /* PD_ALL_VISIBLE was cleared */ |
92 | #define XLH_DELETE_ALL_VISIBLE_CLEARED (1<<0) |
93 | #define XLH_DELETE_CONTAINS_OLD_TUPLE (1<<1) |
94 | #define XLH_DELETE_CONTAINS_OLD_KEY (1<<2) |
95 | #define XLH_DELETE_IS_SUPER (1<<3) |
96 | #define XLH_DELETE_IS_PARTITION_MOVE (1<<4) |
97 | |
98 | /* convenience macro for checking whether any form of old tuple was logged */ |
99 | #define XLH_DELETE_CONTAINS_OLD \ |
100 | (XLH_DELETE_CONTAINS_OLD_TUPLE | XLH_DELETE_CONTAINS_OLD_KEY) |
101 | |
102 | /* This is what we need to know about delete */ |
103 | typedef struct xl_heap_delete |
104 | { |
105 | TransactionId xmax; /* xmax of the deleted tuple */ |
106 | OffsetNumber offnum; /* deleted tuple's offset */ |
107 | uint8 infobits_set; /* infomask bits */ |
108 | uint8 flags; |
109 | } xl_heap_delete; |
110 | |
111 | #define SizeOfHeapDelete (offsetof(xl_heap_delete, flags) + sizeof(uint8)) |
112 | |
113 | /* |
114 | * xl_heap_truncate flag values, 8 bits are available. |
115 | */ |
116 | #define XLH_TRUNCATE_CASCADE (1<<0) |
117 | #define XLH_TRUNCATE_RESTART_SEQS (1<<1) |
118 | |
119 | /* |
120 | * For truncate we list all truncated relids in an array, followed by all |
121 | * sequence relids that need to be restarted, if any. |
122 | * All rels are always within the same database, so we just list dbid once. |
123 | */ |
124 | typedef struct xl_heap_truncate |
125 | { |
126 | Oid dbId; |
127 | uint32 nrelids; |
128 | uint8 flags; |
129 | Oid relids[FLEXIBLE_ARRAY_MEMBER]; |
130 | } xl_heap_truncate; |
131 | |
132 | #define SizeOfHeapTruncate (offsetof(xl_heap_truncate, relids)) |
133 | |
134 | /* |
135 | * We don't store the whole fixed part (HeapTupleHeaderData) of an inserted |
136 | * or updated tuple in WAL; we can save a few bytes by reconstructing the |
137 | * fields that are available elsewhere in the WAL record, or perhaps just |
138 | * plain needn't be reconstructed. These are the fields we must store. |
139 | * NOTE: t_hoff could be recomputed, but we may as well store it because |
140 | * it will come for free due to alignment considerations. |
141 | */ |
142 | typedef struct |
143 | { |
144 | uint16 ; |
145 | uint16 ; |
146 | uint8 ; |
147 | } ; |
148 | |
149 | #define (offsetof(xl_heap_header, t_hoff) + sizeof(uint8)) |
150 | |
151 | /* This is what we need to know about insert */ |
152 | typedef struct xl_heap_insert |
153 | { |
154 | OffsetNumber offnum; /* inserted tuple's offset */ |
155 | uint8 flags; |
156 | |
157 | /* xl_heap_header & TUPLE DATA in backup block 0 */ |
158 | } xl_heap_insert; |
159 | |
160 | #define SizeOfHeapInsert (offsetof(xl_heap_insert, flags) + sizeof(uint8)) |
161 | |
162 | /* |
163 | * This is what we need to know about a multi-insert. |
164 | * |
165 | * The main data of the record consists of this xl_heap_multi_insert header. |
166 | * 'offsets' array is omitted if the whole page is reinitialized |
167 | * (XLOG_HEAP_INIT_PAGE). |
168 | * |
169 | * In block 0's data portion, there is an xl_multi_insert_tuple struct, |
170 | * followed by the tuple data for each tuple. There is padding to align |
171 | * each xl_multi_insert struct. |
172 | */ |
173 | typedef struct xl_heap_multi_insert |
174 | { |
175 | uint8 flags; |
176 | uint16 ntuples; |
177 | OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]; |
178 | } xl_heap_multi_insert; |
179 | |
180 | #define SizeOfHeapMultiInsert offsetof(xl_heap_multi_insert, offsets) |
181 | |
182 | typedef struct xl_multi_insert_tuple |
183 | { |
184 | uint16 datalen; /* size of tuple data that follows */ |
185 | uint16 t_infomask2; |
186 | uint16 t_infomask; |
187 | uint8 t_hoff; |
188 | /* TUPLE DATA FOLLOWS AT END OF STRUCT */ |
189 | } xl_multi_insert_tuple; |
190 | |
191 | #define SizeOfMultiInsertTuple (offsetof(xl_multi_insert_tuple, t_hoff) + sizeof(uint8)) |
192 | |
193 | /* |
194 | * This is what we need to know about update|hot_update |
195 | * |
196 | * Backup blk 0: new page |
197 | * |
198 | * If XLOG_HEAP_PREFIX_FROM_OLD or XLOG_HEAP_SUFFIX_FROM_OLD flags are set, |
199 | * the prefix and/or suffix come first, as one or two uint16s. |
200 | * |
201 | * After that, xl_heap_header and new tuple data follow. The new tuple |
202 | * data doesn't include the prefix and suffix, which are copied from the |
203 | * old tuple on replay. |
204 | * |
205 | * If HEAP_CONTAINS_NEW_TUPLE_DATA flag is given, the tuple data is |
206 | * included even if a full-page image was taken. |
207 | * |
208 | * Backup blk 1: old page, if different. (no data, just a reference to the blk) |
209 | */ |
210 | typedef struct xl_heap_update |
211 | { |
212 | TransactionId old_xmax; /* xmax of the old tuple */ |
213 | OffsetNumber old_offnum; /* old tuple's offset */ |
214 | uint8 old_infobits_set; /* infomask bits to set on old tuple */ |
215 | uint8 flags; |
216 | TransactionId new_xmax; /* xmax of the new tuple */ |
217 | OffsetNumber new_offnum; /* new tuple's offset */ |
218 | |
219 | /* |
220 | * If XLOG_HEAP_CONTAINS_OLD_TUPLE or XLOG_HEAP_CONTAINS_OLD_KEY flags are |
221 | * set, a xl_heap_header struct and tuple data for the old tuple follows. |
222 | */ |
223 | } xl_heap_update; |
224 | |
225 | #define SizeOfHeapUpdate (offsetof(xl_heap_update, new_offnum) + sizeof(OffsetNumber)) |
226 | |
227 | /* |
228 | * This is what we need to know about vacuum page cleanup/redirect |
229 | * |
230 | * The array of OffsetNumbers following the fixed part of the record contains: |
231 | * * for each redirected item: the item offset, then the offset redirected to |
232 | * * for each now-dead item: the item offset |
233 | * * for each now-unused item: the item offset |
234 | * The total number of OffsetNumbers is therefore 2*nredirected+ndead+nunused. |
235 | * Note that nunused is not explicitly stored, but may be found by reference |
236 | * to the total record length. |
237 | */ |
238 | typedef struct xl_heap_clean |
239 | { |
240 | TransactionId latestRemovedXid; |
241 | uint16 nredirected; |
242 | uint16 ndead; |
243 | /* OFFSET NUMBERS are in the block reference 0 */ |
244 | } xl_heap_clean; |
245 | |
246 | #define SizeOfHeapClean (offsetof(xl_heap_clean, ndead) + sizeof(uint16)) |
247 | |
248 | /* |
249 | * Cleanup_info is required in some cases during a lazy VACUUM. |
250 | * Used for reporting the results of HeapTupleHeaderAdvanceLatestRemovedXid() |
251 | * see vacuumlazy.c for full explanation |
252 | */ |
253 | typedef struct xl_heap_cleanup_info |
254 | { |
255 | RelFileNode node; |
256 | TransactionId latestRemovedXid; |
257 | } xl_heap_cleanup_info; |
258 | |
259 | #define SizeOfHeapCleanupInfo (sizeof(xl_heap_cleanup_info)) |
260 | |
261 | /* flags for infobits_set */ |
262 | #define XLHL_XMAX_IS_MULTI 0x01 |
263 | #define XLHL_XMAX_LOCK_ONLY 0x02 |
264 | #define XLHL_XMAX_EXCL_LOCK 0x04 |
265 | #define XLHL_XMAX_KEYSHR_LOCK 0x08 |
266 | #define XLHL_KEYS_UPDATED 0x10 |
267 | |
268 | /* flag bits for xl_heap_lock / xl_heap_lock_updated's flag field */ |
269 | #define XLH_LOCK_ALL_FROZEN_CLEARED 0x01 |
270 | |
271 | /* This is what we need to know about lock */ |
272 | typedef struct xl_heap_lock |
273 | { |
274 | TransactionId locking_xid; /* might be a MultiXactId not xid */ |
275 | OffsetNumber offnum; /* locked tuple's offset on page */ |
276 | int8 infobits_set; /* infomask and infomask2 bits to set */ |
277 | uint8 flags; /* XLH_LOCK_* flag bits */ |
278 | } xl_heap_lock; |
279 | |
280 | #define SizeOfHeapLock (offsetof(xl_heap_lock, flags) + sizeof(int8)) |
281 | |
282 | /* This is what we need to know about locking an updated version of a row */ |
283 | typedef struct xl_heap_lock_updated |
284 | { |
285 | TransactionId xmax; |
286 | OffsetNumber offnum; |
287 | uint8 infobits_set; |
288 | uint8 flags; |
289 | } xl_heap_lock_updated; |
290 | |
291 | #define SizeOfHeapLockUpdated (offsetof(xl_heap_lock_updated, flags) + sizeof(uint8)) |
292 | |
293 | /* This is what we need to know about confirmation of speculative insertion */ |
294 | typedef struct xl_heap_confirm |
295 | { |
296 | OffsetNumber offnum; /* confirmed tuple's offset on page */ |
297 | } xl_heap_confirm; |
298 | |
299 | #define SizeOfHeapConfirm (offsetof(xl_heap_confirm, offnum) + sizeof(OffsetNumber)) |
300 | |
301 | /* This is what we need to know about in-place update */ |
302 | typedef struct xl_heap_inplace |
303 | { |
304 | OffsetNumber offnum; /* updated tuple's offset on page */ |
305 | /* TUPLE DATA FOLLOWS AT END OF STRUCT */ |
306 | } xl_heap_inplace; |
307 | |
308 | #define SizeOfHeapInplace (offsetof(xl_heap_inplace, offnum) + sizeof(OffsetNumber)) |
309 | |
310 | /* |
311 | * This struct represents a 'freeze plan', which is what we need to know about |
312 | * a single tuple being frozen during vacuum. |
313 | */ |
314 | /* 0x01 was XLH_FREEZE_XMIN */ |
315 | #define XLH_FREEZE_XVAC 0x02 |
316 | #define XLH_INVALID_XVAC 0x04 |
317 | |
318 | typedef struct xl_heap_freeze_tuple |
319 | { |
320 | TransactionId xmax; |
321 | OffsetNumber offset; |
322 | uint16 t_infomask2; |
323 | uint16 t_infomask; |
324 | uint8 frzflags; |
325 | } xl_heap_freeze_tuple; |
326 | |
327 | /* |
328 | * This is what we need to know about a block being frozen during vacuum |
329 | * |
330 | * Backup block 0's data contains an array of xl_heap_freeze_tuple structs, |
331 | * one for each tuple. |
332 | */ |
333 | typedef struct xl_heap_freeze_page |
334 | { |
335 | TransactionId cutoff_xid; |
336 | uint16 ntuples; |
337 | } xl_heap_freeze_page; |
338 | |
339 | #define SizeOfHeapFreezePage (offsetof(xl_heap_freeze_page, ntuples) + sizeof(uint16)) |
340 | |
341 | /* |
342 | * This is what we need to know about setting a visibility map bit |
343 | * |
344 | * Backup blk 0: visibility map buffer |
345 | * Backup blk 1: heap buffer |
346 | */ |
347 | typedef struct xl_heap_visible |
348 | { |
349 | TransactionId cutoff_xid; |
350 | uint8 flags; |
351 | } xl_heap_visible; |
352 | |
353 | #define SizeOfHeapVisible (offsetof(xl_heap_visible, flags) + sizeof(uint8)) |
354 | |
355 | typedef struct xl_heap_new_cid |
356 | { |
357 | /* |
358 | * store toplevel xid so we don't have to merge cids from different |
359 | * transactions |
360 | */ |
361 | TransactionId top_xid; |
362 | CommandId cmin; |
363 | CommandId cmax; |
364 | CommandId combocid; /* just for debugging */ |
365 | |
366 | /* |
367 | * Store the relfilenode/ctid pair to facilitate lookups. |
368 | */ |
369 | RelFileNode target_node; |
370 | ItemPointerData target_tid; |
371 | } xl_heap_new_cid; |
372 | |
373 | #define SizeOfHeapNewCid (offsetof(xl_heap_new_cid, target_tid) + sizeof(ItemPointerData)) |
374 | |
375 | /* logical rewrite xlog record header */ |
376 | typedef struct xl_heap_rewrite_mapping |
377 | { |
378 | TransactionId mapped_xid; /* xid that might need to see the row */ |
379 | Oid mapped_db; /* DbOid or InvalidOid for shared rels */ |
380 | Oid mapped_rel; /* Oid of the mapped relation */ |
381 | off_t offset; /* How far have we written so far */ |
382 | uint32 num_mappings; /* Number of in-memory mappings */ |
383 | XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */ |
384 | } xl_heap_rewrite_mapping; |
385 | |
386 | extern void (HeapTupleHeader tuple, |
387 | TransactionId *latestRemovedXid); |
388 | |
389 | extern void heap_redo(XLogReaderState *record); |
390 | extern void heap_desc(StringInfo buf, XLogReaderState *record); |
391 | extern const char *heap_identify(uint8 info); |
392 | extern void heap_mask(char *pagedata, BlockNumber blkno); |
393 | extern void heap2_redo(XLogReaderState *record); |
394 | extern void heap2_desc(StringInfo buf, XLogReaderState *record); |
395 | extern const char *heap2_identify(uint8 info); |
396 | extern void heap_xlog_logical_rewrite(XLogReaderState *r); |
397 | |
398 | extern XLogRecPtr log_heap_cleanup_info(RelFileNode rnode, |
399 | TransactionId latestRemovedXid); |
400 | extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer, |
401 | OffsetNumber *redirected, int nredirected, |
402 | OffsetNumber *nowdead, int ndead, |
403 | OffsetNumber *nowunused, int nunused, |
404 | TransactionId latestRemovedXid); |
405 | extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, |
406 | TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, |
407 | int ntuples); |
408 | extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, |
409 | TransactionId relfrozenxid, |
410 | TransactionId relminmxid, |
411 | TransactionId cutoff_xid, |
412 | TransactionId cutoff_multi, |
413 | xl_heap_freeze_tuple *frz, |
414 | bool *totally_frozen); |
415 | extern void heap_execute_freeze_tuple(HeapTupleHeader tuple, |
416 | xl_heap_freeze_tuple *xlrec_tp); |
417 | extern XLogRecPtr log_heap_visible(RelFileNode rnode, Buffer heap_buffer, |
418 | Buffer vm_buffer, TransactionId cutoff_xid, uint8 flags); |
419 | |
420 | #endif /* HEAPAM_XLOG_H */ |
421 | |