1 | /*------------------------------------------------------------------------- |
2 | * |
3 | * nodeLockRows.c |
4 | * Routines to handle FOR UPDATE/FOR SHARE row locking |
5 | * |
6 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
7 | * Portions Copyright (c) 1994, Regents of the University of California |
8 | * |
9 | * |
10 | * IDENTIFICATION |
11 | * src/backend/executor/nodeLockRows.c |
12 | * |
13 | *------------------------------------------------------------------------- |
14 | */ |
15 | /* |
16 | * INTERFACE ROUTINES |
17 | * ExecLockRows - fetch locked rows |
18 | * ExecInitLockRows - initialize node and subnodes.. |
19 | * ExecEndLockRows - shutdown node and subnodes |
20 | */ |
21 | |
22 | #include "postgres.h" |
23 | |
24 | #include "access/tableam.h" |
25 | #include "access/xact.h" |
26 | #include "executor/executor.h" |
27 | #include "executor/nodeLockRows.h" |
28 | #include "foreign/fdwapi.h" |
29 | #include "miscadmin.h" |
30 | #include "utils/rel.h" |
31 | |
32 | |
33 | /* ---------------------------------------------------------------- |
34 | * ExecLockRows |
35 | * ---------------------------------------------------------------- |
36 | */ |
37 | static TupleTableSlot * /* return: a tuple or NULL */ |
38 | ExecLockRows(PlanState *pstate) |
39 | { |
40 | LockRowsState *node = castNode(LockRowsState, pstate); |
41 | TupleTableSlot *slot; |
42 | EState *estate; |
43 | PlanState *outerPlan; |
44 | bool epq_needed; |
45 | ListCell *lc; |
46 | |
47 | CHECK_FOR_INTERRUPTS(); |
48 | |
49 | /* |
50 | * get information from the node |
51 | */ |
52 | estate = node->ps.state; |
53 | outerPlan = outerPlanState(node); |
54 | |
55 | /* |
56 | * Get next tuple from subplan, if any. |
57 | */ |
58 | lnext: |
59 | slot = ExecProcNode(outerPlan); |
60 | |
61 | if (TupIsNull(slot)) |
62 | return NULL; |
63 | |
64 | /* We don't need EvalPlanQual unless we get updated tuple version(s) */ |
65 | epq_needed = false; |
66 | |
67 | /* |
68 | * Attempt to lock the source tuple(s). (Note we only have locking |
69 | * rowmarks in lr_arowMarks.) |
70 | */ |
71 | foreach(lc, node->lr_arowMarks) |
72 | { |
73 | ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc); |
74 | ExecRowMark *erm = aerm->rowmark; |
75 | Datum datum; |
76 | bool isNull; |
77 | ItemPointerData tid; |
78 | TM_FailureData tmfd; |
79 | LockTupleMode lockmode; |
80 | int lockflags = 0; |
81 | TM_Result test; |
82 | TupleTableSlot *markSlot; |
83 | |
84 | /* clear any leftover test tuple for this rel */ |
85 | markSlot = EvalPlanQualSlot(&node->lr_epqstate, erm->relation, erm->rti); |
86 | ExecClearTuple(markSlot); |
87 | |
88 | /* if child rel, must check whether it produced this row */ |
89 | if (erm->rti != erm->prti) |
90 | { |
91 | Oid tableoid; |
92 | |
93 | datum = ExecGetJunkAttribute(slot, |
94 | aerm->toidAttNo, |
95 | &isNull); |
96 | /* shouldn't ever get a null result... */ |
97 | if (isNull) |
98 | elog(ERROR, "tableoid is NULL" ); |
99 | tableoid = DatumGetObjectId(datum); |
100 | |
101 | Assert(OidIsValid(erm->relid)); |
102 | if (tableoid != erm->relid) |
103 | { |
104 | /* this child is inactive right now */ |
105 | erm->ermActive = false; |
106 | ItemPointerSetInvalid(&(erm->curCtid)); |
107 | ExecClearTuple(markSlot); |
108 | continue; |
109 | } |
110 | } |
111 | erm->ermActive = true; |
112 | |
113 | /* fetch the tuple's ctid */ |
114 | datum = ExecGetJunkAttribute(slot, |
115 | aerm->ctidAttNo, |
116 | &isNull); |
117 | /* shouldn't ever get a null result... */ |
118 | if (isNull) |
119 | elog(ERROR, "ctid is NULL" ); |
120 | |
121 | /* requests for foreign tables must be passed to their FDW */ |
122 | if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE) |
123 | { |
124 | FdwRoutine *fdwroutine; |
125 | bool updated = false; |
126 | |
127 | fdwroutine = GetFdwRoutineForRelation(erm->relation, false); |
128 | /* this should have been checked already, but let's be safe */ |
129 | if (fdwroutine->RefetchForeignRow == NULL) |
130 | ereport(ERROR, |
131 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
132 | errmsg("cannot lock rows in foreign table \"%s\"" , |
133 | RelationGetRelationName(erm->relation)))); |
134 | |
135 | fdwroutine->RefetchForeignRow(estate, |
136 | erm, |
137 | datum, |
138 | markSlot, |
139 | &updated); |
140 | if (TupIsNull(markSlot)) |
141 | { |
142 | /* couldn't get the lock, so skip this row */ |
143 | goto lnext; |
144 | } |
145 | |
146 | /* |
147 | * if FDW says tuple was updated before getting locked, we need to |
148 | * perform EPQ testing to see if quals are still satisfied |
149 | */ |
150 | if (updated) |
151 | epq_needed = true; |
152 | |
153 | continue; |
154 | } |
155 | |
156 | /* okay, try to lock (and fetch) the tuple */ |
157 | tid = *((ItemPointer) DatumGetPointer(datum)); |
158 | switch (erm->markType) |
159 | { |
160 | case ROW_MARK_EXCLUSIVE: |
161 | lockmode = LockTupleExclusive; |
162 | break; |
163 | case ROW_MARK_NOKEYEXCLUSIVE: |
164 | lockmode = LockTupleNoKeyExclusive; |
165 | break; |
166 | case ROW_MARK_SHARE: |
167 | lockmode = LockTupleShare; |
168 | break; |
169 | case ROW_MARK_KEYSHARE: |
170 | lockmode = LockTupleKeyShare; |
171 | break; |
172 | default: |
173 | elog(ERROR, "unsupported rowmark type" ); |
174 | lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */ |
175 | break; |
176 | } |
177 | |
178 | lockflags = TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS; |
179 | if (!IsolationUsesXactSnapshot()) |
180 | lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION; |
181 | |
182 | test = table_tuple_lock(erm->relation, &tid, estate->es_snapshot, |
183 | markSlot, estate->es_output_cid, |
184 | lockmode, erm->waitPolicy, |
185 | lockflags, |
186 | &tmfd); |
187 | |
188 | switch (test) |
189 | { |
190 | case TM_WouldBlock: |
191 | /* couldn't lock tuple in SKIP LOCKED mode */ |
192 | goto lnext; |
193 | |
194 | case TM_SelfModified: |
195 | |
196 | /* |
197 | * The target tuple was already updated or deleted by the |
198 | * current command, or by a later command in the current |
199 | * transaction. We *must* ignore the tuple in the former |
200 | * case, so as to avoid the "Halloween problem" of repeated |
201 | * update attempts. In the latter case it might be sensible |
202 | * to fetch the updated tuple instead, but doing so would |
203 | * require changing heap_update and heap_delete to not |
204 | * complain about updating "invisible" tuples, which seems |
205 | * pretty scary (table_tuple_lock will not complain, but few |
206 | * callers expect TM_Invisible, and we're not one of them). So |
207 | * for now, treat the tuple as deleted and do not process. |
208 | */ |
209 | goto lnext; |
210 | |
211 | case TM_Ok: |
212 | |
213 | /* |
214 | * Got the lock successfully, the locked tuple saved in |
215 | * markSlot for, if needed, EvalPlanQual testing below. |
216 | */ |
217 | if (tmfd.traversed) |
218 | epq_needed = true; |
219 | break; |
220 | |
221 | case TM_Updated: |
222 | if (IsolationUsesXactSnapshot()) |
223 | ereport(ERROR, |
224 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
225 | errmsg("could not serialize access due to concurrent update" ))); |
226 | elog(ERROR, "unexpected table_tuple_lock status: %u" , |
227 | test); |
228 | break; |
229 | |
230 | case TM_Deleted: |
231 | if (IsolationUsesXactSnapshot()) |
232 | ereport(ERROR, |
233 | (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), |
234 | errmsg("could not serialize access due to concurrent update" ))); |
235 | /* tuple was deleted so don't return it */ |
236 | goto lnext; |
237 | |
238 | case TM_Invisible: |
239 | elog(ERROR, "attempted to lock invisible tuple" ); |
240 | break; |
241 | |
242 | default: |
243 | elog(ERROR, "unrecognized table_tuple_lock status: %u" , |
244 | test); |
245 | } |
246 | |
247 | /* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */ |
248 | erm->curCtid = tid; |
249 | } |
250 | |
251 | /* |
252 | * If we need to do EvalPlanQual testing, do so. |
253 | */ |
254 | if (epq_needed) |
255 | { |
256 | /* Initialize EPQ machinery */ |
257 | EvalPlanQualBegin(&node->lr_epqstate); |
258 | |
259 | /* |
260 | * To fetch non-locked source rows the EPQ logic needs to access junk |
261 | * columns from the tuple being tested. |
262 | */ |
263 | EvalPlanQualSetSlot(&node->lr_epqstate, slot); |
264 | |
265 | /* |
266 | * And finally we can re-evaluate the tuple. |
267 | */ |
268 | slot = EvalPlanQualNext(&node->lr_epqstate); |
269 | if (TupIsNull(slot)) |
270 | { |
271 | /* Updated tuple fails qual, so ignore it and go on */ |
272 | goto lnext; |
273 | } |
274 | } |
275 | |
276 | /* Got all locks, so return the current tuple */ |
277 | return slot; |
278 | } |
279 | |
280 | /* ---------------------------------------------------------------- |
281 | * ExecInitLockRows |
282 | * |
283 | * This initializes the LockRows node state structures and |
284 | * the node's subplan. |
285 | * ---------------------------------------------------------------- |
286 | */ |
287 | LockRowsState * |
288 | ExecInitLockRows(LockRows *node, EState *estate, int eflags) |
289 | { |
290 | LockRowsState *lrstate; |
291 | Plan *outerPlan = outerPlan(node); |
292 | List *epq_arowmarks; |
293 | ListCell *lc; |
294 | |
295 | /* check for unsupported flags */ |
296 | Assert(!(eflags & EXEC_FLAG_MARK)); |
297 | |
298 | /* |
299 | * create state structure |
300 | */ |
301 | lrstate = makeNode(LockRowsState); |
302 | lrstate->ps.plan = (Plan *) node; |
303 | lrstate->ps.state = estate; |
304 | lrstate->ps.ExecProcNode = ExecLockRows; |
305 | |
306 | /* |
307 | * Miscellaneous initialization |
308 | * |
309 | * LockRows nodes never call ExecQual or ExecProject, therefore no |
310 | * ExprContext is needed. |
311 | */ |
312 | |
313 | /* |
314 | * Initialize result type. |
315 | */ |
316 | ExecInitResultTypeTL(&lrstate->ps); |
317 | |
318 | /* |
319 | * then initialize outer plan |
320 | */ |
321 | outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags); |
322 | |
323 | /* node returns unmodified slots from the outer plan */ |
324 | lrstate->ps.resultopsset = true; |
325 | lrstate->ps.resultops = ExecGetResultSlotOps(outerPlanState(lrstate), |
326 | &lrstate->ps.resultopsfixed); |
327 | |
328 | /* |
329 | * LockRows nodes do no projections, so initialize projection info for |
330 | * this node appropriately |
331 | */ |
332 | lrstate->ps.ps_ProjInfo = NULL; |
333 | |
334 | /* |
335 | * Locate the ExecRowMark(s) that this node is responsible for, and |
336 | * construct ExecAuxRowMarks for them. (InitPlan should already have |
337 | * built the global list of ExecRowMarks.) |
338 | */ |
339 | lrstate->lr_arowMarks = NIL; |
340 | epq_arowmarks = NIL; |
341 | foreach(lc, node->rowMarks) |
342 | { |
343 | PlanRowMark *rc = lfirst_node(PlanRowMark, lc); |
344 | ExecRowMark *erm; |
345 | ExecAuxRowMark *aerm; |
346 | |
347 | /* ignore "parent" rowmarks; they are irrelevant at runtime */ |
348 | if (rc->isParent) |
349 | continue; |
350 | |
351 | /* find ExecRowMark and build ExecAuxRowMark */ |
352 | erm = ExecFindRowMark(estate, rc->rti, false); |
353 | aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist); |
354 | |
355 | /* |
356 | * Only locking rowmarks go into our own list. Non-locking marks are |
357 | * passed off to the EvalPlanQual machinery. This is because we don't |
358 | * want to bother fetching non-locked rows unless we actually have to |
359 | * do an EPQ recheck. |
360 | */ |
361 | if (RowMarkRequiresRowShareLock(erm->markType)) |
362 | lrstate->lr_arowMarks = lappend(lrstate->lr_arowMarks, aerm); |
363 | else |
364 | epq_arowmarks = lappend(epq_arowmarks, aerm); |
365 | } |
366 | |
367 | /* Now we have the info needed to set up EPQ state */ |
368 | EvalPlanQualInit(&lrstate->lr_epqstate, estate, |
369 | outerPlan, epq_arowmarks, node->epqParam); |
370 | |
371 | return lrstate; |
372 | } |
373 | |
374 | /* ---------------------------------------------------------------- |
375 | * ExecEndLockRows |
376 | * |
377 | * This shuts down the subplan and frees resources allocated |
378 | * to this node. |
379 | * ---------------------------------------------------------------- |
380 | */ |
381 | void |
382 | ExecEndLockRows(LockRowsState *node) |
383 | { |
384 | EvalPlanQualEnd(&node->lr_epqstate); |
385 | ExecEndNode(outerPlanState(node)); |
386 | } |
387 | |
388 | |
389 | void |
390 | ExecReScanLockRows(LockRowsState *node) |
391 | { |
392 | /* |
393 | * if chgParam of subnode is not null then plan will be re-scanned by |
394 | * first ExecProcNode. |
395 | */ |
396 | if (node->ps.lefttree->chgParam == NULL) |
397 | ExecReScan(node->ps.lefttree); |
398 | } |
399 | |