| 1 | /*------------------------------------------------------------------------- |
| 2 | * |
| 3 | * relscan.h |
| 4 | * POSTGRES relation scan descriptor definitions. |
| 5 | * |
| 6 | * |
| 7 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
| 8 | * Portions Copyright (c) 1994, Regents of the University of California |
| 9 | * |
| 10 | * src/include/access/relscan.h |
| 11 | * |
| 12 | *------------------------------------------------------------------------- |
| 13 | */ |
| 14 | #ifndef RELSCAN_H |
| 15 | #define RELSCAN_H |
| 16 | |
| 17 | #include "access/htup_details.h" |
| 18 | #include "access/itup.h" |
| 19 | #include "port/atomics.h" |
| 20 | #include "storage/buf.h" |
| 21 | #include "storage/spin.h" |
| 22 | #include "utils/relcache.h" |
| 23 | |
| 24 | |
| 25 | struct ParallelTableScanDescData; |
| 26 | |
| 27 | /* |
| 28 | * Generic descriptor for table scans. This is the base-class for table scans, |
| 29 | * which needs to be embedded in the scans of individual AMs. |
| 30 | */ |
| 31 | typedef struct TableScanDescData |
| 32 | { |
| 33 | /* scan parameters */ |
| 34 | Relation rs_rd; /* heap relation descriptor */ |
| 35 | struct SnapshotData *rs_snapshot; /* snapshot to see */ |
| 36 | int rs_nkeys; /* number of scan keys */ |
| 37 | struct ScanKeyData *rs_key; /* array of scan key descriptors */ |
| 38 | |
| 39 | /* |
| 40 | * Information about type and behaviour of the scan, a bitmask of members |
| 41 | * of the ScanOptions enum (see tableam.h). |
| 42 | */ |
| 43 | uint32 rs_flags; |
| 44 | |
| 45 | struct ParallelTableScanDescData *rs_parallel; /* parallel scan |
| 46 | * information */ |
| 47 | |
| 48 | } TableScanDescData; |
| 49 | typedef struct TableScanDescData *TableScanDesc; |
| 50 | |
| 51 | /* |
| 52 | * Shared state for parallel table scan. |
| 53 | * |
| 54 | * Each backend participating in a parallel table scan has its own |
| 55 | * TableScanDesc in backend-private memory, and those objects all contain a |
| 56 | * pointer to this structure. The information here must be sufficient to |
| 57 | * properly initialize each new TableScanDesc as workers join the scan, and it |
| 58 | * must act as a information what to scan for those workers. |
| 59 | */ |
| 60 | typedef struct ParallelTableScanDescData |
| 61 | { |
| 62 | Oid phs_relid; /* OID of relation to scan */ |
| 63 | bool phs_syncscan; /* report location to syncscan logic? */ |
| 64 | bool phs_snapshot_any; /* SnapshotAny, not phs_snapshot_data? */ |
| 65 | Size phs_snapshot_off; /* data for snapshot */ |
| 66 | } ParallelTableScanDescData; |
| 67 | typedef struct ParallelTableScanDescData *ParallelTableScanDesc; |
| 68 | |
| 69 | /* |
| 70 | * Shared state for parallel table scans, for block oriented storage. |
| 71 | */ |
| 72 | typedef struct ParallelBlockTableScanDescData |
| 73 | { |
| 74 | ParallelTableScanDescData base; |
| 75 | |
| 76 | BlockNumber phs_nblocks; /* # blocks in relation at start of scan */ |
| 77 | slock_t phs_mutex; /* mutual exclusion for setting startblock */ |
| 78 | BlockNumber phs_startblock; /* starting block number */ |
| 79 | pg_atomic_uint64 phs_nallocated; /* number of blocks allocated to |
| 80 | * workers so far. */ |
| 81 | } ParallelBlockTableScanDescData; |
| 82 | typedef struct ParallelBlockTableScanDescData *ParallelBlockTableScanDesc; |
| 83 | |
| 84 | /* |
| 85 | * Base class for fetches from a table via an index. This is the base-class |
| 86 | * for such scans, which needs to be embedded in the respective struct for |
| 87 | * individual AMs. |
| 88 | */ |
| 89 | typedef struct IndexFetchTableData |
| 90 | { |
| 91 | Relation rel; |
| 92 | } IndexFetchTableData; |
| 93 | |
| 94 | /* |
| 95 | * We use the same IndexScanDescData structure for both amgettuple-based |
| 96 | * and amgetbitmap-based index scans. Some fields are only relevant in |
| 97 | * amgettuple-based scans. |
| 98 | */ |
| 99 | typedef struct IndexScanDescData |
| 100 | { |
| 101 | /* scan parameters */ |
| 102 | Relation heapRelation; /* heap relation descriptor, or NULL */ |
| 103 | Relation indexRelation; /* index relation descriptor */ |
| 104 | struct SnapshotData *xs_snapshot; /* snapshot to see */ |
| 105 | int numberOfKeys; /* number of index qualifier conditions */ |
| 106 | int numberOfOrderBys; /* number of ordering operators */ |
| 107 | struct ScanKeyData *keyData; /* array of index qualifier descriptors */ |
| 108 | struct ScanKeyData *orderByData; /* array of ordering op descriptors */ |
| 109 | bool xs_want_itup; /* caller requests index tuples */ |
| 110 | bool xs_temp_snap; /* unregister snapshot at scan end? */ |
| 111 | |
| 112 | /* signaling to index AM about killing index tuples */ |
| 113 | bool kill_prior_tuple; /* last-returned tuple is dead */ |
| 114 | bool ignore_killed_tuples; /* do not return killed entries */ |
| 115 | bool xactStartedInRecovery; /* prevents killing/seeing killed |
| 116 | * tuples */ |
| 117 | |
| 118 | /* index access method's private state */ |
| 119 | void *opaque; /* access-method-specific info */ |
| 120 | |
| 121 | /* |
| 122 | * In an index-only scan, a successful amgettuple call must fill either |
| 123 | * xs_itup (and xs_itupdesc) or xs_hitup (and xs_hitupdesc) to provide the |
| 124 | * data returned by the scan. It can fill both, in which case the heap |
| 125 | * format will be used. |
| 126 | */ |
| 127 | IndexTuple xs_itup; /* index tuple returned by AM */ |
| 128 | struct TupleDescData *xs_itupdesc; /* rowtype descriptor of xs_itup */ |
| 129 | HeapTuple xs_hitup; /* index data returned by AM, as HeapTuple */ |
| 130 | struct TupleDescData *xs_hitupdesc; /* rowtype descriptor of xs_hitup */ |
| 131 | |
| 132 | ItemPointerData xs_heaptid; /* result */ |
| 133 | bool xs_heap_continue; /* T if must keep walking, potential |
| 134 | * further results */ |
| 135 | IndexFetchTableData *xs_heapfetch; |
| 136 | |
| 137 | bool xs_recheck; /* T means scan keys must be rechecked */ |
| 138 | |
| 139 | /* |
| 140 | * When fetching with an ordering operator, the values of the ORDER BY |
| 141 | * expressions of the last returned tuple, according to the index. If |
| 142 | * xs_recheckorderby is true, these need to be rechecked just like the |
| 143 | * scan keys, and the values returned here are a lower-bound on the actual |
| 144 | * values. |
| 145 | */ |
| 146 | Datum *xs_orderbyvals; |
| 147 | bool *xs_orderbynulls; |
| 148 | bool xs_recheckorderby; |
| 149 | |
| 150 | /* parallel index scan information, in shared memory */ |
| 151 | struct ParallelIndexScanDescData *parallel_scan; |
| 152 | } IndexScanDescData; |
| 153 | |
| 154 | /* Generic structure for parallel scans */ |
| 155 | typedef struct ParallelIndexScanDescData |
| 156 | { |
| 157 | Oid ps_relid; |
| 158 | Oid ps_indexid; |
| 159 | Size ps_offset; /* Offset in bytes of am specific structure */ |
| 160 | char ps_snapshot_data[FLEXIBLE_ARRAY_MEMBER]; |
| 161 | } ParallelIndexScanDescData; |
| 162 | |
| 163 | struct TupleTableSlot; |
| 164 | |
| 165 | /* Struct for storage-or-index scans of system tables */ |
| 166 | typedef struct SysScanDescData |
| 167 | { |
| 168 | Relation heap_rel; /* catalog being scanned */ |
| 169 | Relation irel; /* NULL if doing heap scan */ |
| 170 | struct TableScanDescData *scan; /* only valid in storage-scan case */ |
| 171 | struct IndexScanDescData *iscan; /* only valid in index-scan case */ |
| 172 | struct SnapshotData *snapshot; /* snapshot to unregister at end of scan */ |
| 173 | struct TupleTableSlot *slot; |
| 174 | } SysScanDescData; |
| 175 | |
| 176 | #endif /* RELSCAN_H */ |
| 177 | |