1 | /*------------------------------------------------------------------------- |
2 | * slot.h |
3 | * Replication slot management. |
4 | * |
5 | * Copyright (c) 2012-2019, PostgreSQL Global Development Group |
6 | * |
7 | *------------------------------------------------------------------------- |
8 | */ |
9 | #ifndef SLOT_H |
10 | #define SLOT_H |
11 | |
12 | #include "fmgr.h" |
13 | #include "access/xlog.h" |
14 | #include "access/xlogreader.h" |
15 | #include "storage/condition_variable.h" |
16 | #include "storage/lwlock.h" |
17 | #include "storage/shmem.h" |
18 | #include "storage/spin.h" |
19 | |
20 | /* |
21 | * Behaviour of replication slots, upon release or crash. |
22 | * |
23 | * Slots marked as PERSISTENT are crash-safe and will not be dropped when |
24 | * released. Slots marked as EPHEMERAL will be dropped when released or after |
25 | * restarts. Slots marked TEMPORARY will be dropped at the end of a session |
26 | * or on error. |
27 | * |
28 | * EPHEMERAL is used as a not-quite-ready state when creating persistent |
29 | * slots. EPHEMERAL slots can be made PERSISTENT by calling |
30 | * ReplicationSlotPersist(). For a slot that goes away at the end of a |
31 | * session, TEMPORARY is the appropriate choice. |
32 | */ |
33 | typedef enum ReplicationSlotPersistency |
34 | { |
35 | RS_PERSISTENT, |
36 | RS_EPHEMERAL, |
37 | RS_TEMPORARY |
38 | } ReplicationSlotPersistency; |
39 | |
40 | /* |
41 | * On-Disk data of a replication slot, preserved across restarts. |
42 | */ |
43 | typedef struct ReplicationSlotPersistentData |
44 | { |
45 | /* The slot's identifier */ |
46 | NameData name; |
47 | |
48 | /* database the slot is active on */ |
49 | Oid database; |
50 | |
51 | /* |
52 | * The slot's behaviour when being dropped (or restored after a crash). |
53 | */ |
54 | ReplicationSlotPersistency persistency; |
55 | |
56 | /* |
57 | * xmin horizon for data |
58 | * |
59 | * NB: This may represent a value that hasn't been written to disk yet; |
60 | * see notes for effective_xmin, below. |
61 | */ |
62 | TransactionId xmin; |
63 | |
64 | /* |
65 | * xmin horizon for catalog tuples |
66 | * |
67 | * NB: This may represent a value that hasn't been written to disk yet; |
68 | * see notes for effective_xmin, below. |
69 | */ |
70 | TransactionId catalog_xmin; |
71 | |
72 | /* oldest LSN that might be required by this replication slot */ |
73 | XLogRecPtr restart_lsn; |
74 | |
75 | /* |
76 | * Oldest LSN that the client has acked receipt for. This is used as the |
77 | * start_lsn point in case the client doesn't specify one, and also as a |
78 | * safety measure to jump forwards in case the client specifies a |
79 | * start_lsn that's further in the past than this value. |
80 | */ |
81 | XLogRecPtr confirmed_flush; |
82 | |
83 | /* plugin name */ |
84 | NameData plugin; |
85 | } ReplicationSlotPersistentData; |
86 | |
87 | /* |
88 | * Shared memory state of a single replication slot. |
89 | * |
90 | * The in-memory data of replication slots follows a locking model based |
91 | * on two linked concepts: |
92 | * - A replication slot's in_use flag is switched when added or discarded using |
93 | * the LWLock ReplicationSlotControlLock, which needs to be hold in exclusive |
94 | * mode when updating the flag by the backend owning the slot and doing the |
95 | * operation, while readers (concurrent backends not owning the slot) need |
96 | * to hold it in shared mode when looking at replication slot data. |
97 | * - Individual fields are protected by mutex where only the backend owning |
98 | * the slot is authorized to update the fields from its own slot. The |
99 | * backend owning the slot does not need to take this lock when reading its |
100 | * own fields, while concurrent backends not owning this slot should take the |
101 | * lock when reading this slot's data. |
102 | */ |
103 | typedef struct ReplicationSlot |
104 | { |
105 | /* lock, on same cacheline as effective_xmin */ |
106 | slock_t mutex; |
107 | |
108 | /* is this slot defined */ |
109 | bool in_use; |
110 | |
111 | /* Who is streaming out changes for this slot? 0 in unused slots. */ |
112 | pid_t active_pid; |
113 | |
114 | /* any outstanding modifications? */ |
115 | bool just_dirtied; |
116 | bool dirty; |
117 | |
118 | /* |
119 | * For logical decoding, it's extremely important that we never remove any |
120 | * data that's still needed for decoding purposes, even after a crash; |
121 | * otherwise, decoding will produce wrong answers. Ordinary streaming |
122 | * replication also needs to prevent old row versions from being removed |
123 | * too soon, but the worst consequence we might encounter there is |
124 | * unwanted query cancellations on the standby. Thus, for logical |
125 | * decoding, this value represents the latest xmin that has actually been |
126 | * written to disk, whereas for streaming replication, it's just the same |
127 | * as the persistent value (data.xmin). |
128 | */ |
129 | TransactionId effective_xmin; |
130 | TransactionId effective_catalog_xmin; |
131 | |
132 | /* data surviving shutdowns and crashes */ |
133 | ReplicationSlotPersistentData data; |
134 | |
135 | /* is somebody performing io on this slot? */ |
136 | LWLock io_in_progress_lock; |
137 | |
138 | /* Condition variable signalled when active_pid changes */ |
139 | ConditionVariable active_cv; |
140 | |
141 | /* all the remaining data is only used for logical slots */ |
142 | |
143 | /* |
144 | * When the client has confirmed flushes >= candidate_xmin_lsn we can |
145 | * advance the catalog xmin. When restart_valid has been passed, |
146 | * restart_lsn can be increased. |
147 | */ |
148 | TransactionId candidate_catalog_xmin; |
149 | XLogRecPtr candidate_xmin_lsn; |
150 | XLogRecPtr candidate_restart_valid; |
151 | XLogRecPtr candidate_restart_lsn; |
152 | } ReplicationSlot; |
153 | |
154 | #define SlotIsPhysical(slot) (slot->data.database == InvalidOid) |
155 | #define SlotIsLogical(slot) (slot->data.database != InvalidOid) |
156 | |
157 | /* |
158 | * Shared memory control area for all of replication slots. |
159 | */ |
160 | typedef struct ReplicationSlotCtlData |
161 | { |
162 | /* |
163 | * This array should be declared [FLEXIBLE_ARRAY_MEMBER], but for some |
164 | * reason you can't do that in an otherwise-empty struct. |
165 | */ |
166 | ReplicationSlot replication_slots[1]; |
167 | } ReplicationSlotCtlData; |
168 | |
169 | /* |
170 | * Pointers to shared memory |
171 | */ |
172 | extern PGDLLIMPORT ReplicationSlotCtlData *ReplicationSlotCtl; |
173 | extern PGDLLIMPORT ReplicationSlot *MyReplicationSlot; |
174 | |
175 | /* GUCs */ |
176 | extern PGDLLIMPORT int max_replication_slots; |
177 | |
178 | /* shmem initialization functions */ |
179 | extern Size ReplicationSlotsShmemSize(void); |
180 | extern void ReplicationSlotsShmemInit(void); |
181 | |
182 | /* management of individual slots */ |
183 | extern void ReplicationSlotCreate(const char *name, bool db_specific, |
184 | ReplicationSlotPersistency p); |
185 | extern void ReplicationSlotPersist(void); |
186 | extern void ReplicationSlotDrop(const char *name, bool nowait); |
187 | |
188 | extern void ReplicationSlotAcquire(const char *name, bool nowait); |
189 | extern void ReplicationSlotRelease(void); |
190 | extern void ReplicationSlotCleanup(void); |
191 | extern void ReplicationSlotSave(void); |
192 | extern void ReplicationSlotMarkDirty(void); |
193 | |
194 | /* misc stuff */ |
195 | extern bool ReplicationSlotValidateName(const char *name, int elevel); |
196 | extern void ReplicationSlotReserveWal(void); |
197 | extern void ReplicationSlotsComputeRequiredXmin(bool already_locked); |
198 | extern void ReplicationSlotsComputeRequiredLSN(void); |
199 | extern XLogRecPtr ReplicationSlotsComputeLogicalRestartLSN(void); |
200 | extern bool ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive); |
201 | extern void ReplicationSlotsDropDBSlots(Oid dboid); |
202 | |
203 | extern void StartupReplicationSlots(void); |
204 | extern void CheckPointReplicationSlots(void); |
205 | |
206 | extern void CheckSlotRequirements(void); |
207 | |
208 | #endif /* SLOT_H */ |
209 | |