1 | /*------------------------------------------------------------------------- |
2 | * |
3 | * execMain.c |
4 | * top level executor interface routines |
5 | * |
6 | * INTERFACE ROUTINES |
7 | * ExecutorStart() |
8 | * ExecutorRun() |
9 | * ExecutorFinish() |
10 | * ExecutorEnd() |
11 | * |
12 | * These four procedures are the external interface to the executor. |
13 | * In each case, the query descriptor is required as an argument. |
14 | * |
15 | * ExecutorStart must be called at the beginning of execution of any |
16 | * query plan and ExecutorEnd must always be called at the end of |
17 | * execution of a plan (unless it is aborted due to error). |
18 | * |
19 | * ExecutorRun accepts direction and count arguments that specify whether |
20 | * the plan is to be executed forwards, backwards, and for how many tuples. |
21 | * In some cases ExecutorRun may be called multiple times to process all |
22 | * the tuples for a plan. It is also acceptable to stop short of executing |
23 | * the whole plan (but only if it is a SELECT). |
24 | * |
25 | * ExecutorFinish must be called after the final ExecutorRun call and |
26 | * before ExecutorEnd. This can be omitted only in case of EXPLAIN, |
27 | * which should also omit ExecutorRun. |
28 | * |
29 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
30 | * Portions Copyright (c) 1994, Regents of the University of California |
31 | * |
32 | * |
33 | * IDENTIFICATION |
34 | * src/backend/executor/execMain.c |
35 | * |
36 | *------------------------------------------------------------------------- |
37 | */ |
38 | #include "postgres.h" |
39 | |
40 | #include "access/heapam.h" |
41 | #include "access/htup_details.h" |
42 | #include "access/sysattr.h" |
43 | #include "access/tableam.h" |
44 | #include "access/transam.h" |
45 | #include "access/xact.h" |
46 | #include "catalog/namespace.h" |
47 | #include "catalog/pg_publication.h" |
48 | #include "commands/matview.h" |
49 | #include "commands/trigger.h" |
50 | #include "executor/execdebug.h" |
51 | #include "executor/nodeSubplan.h" |
52 | #include "foreign/fdwapi.h" |
53 | #include "jit/jit.h" |
54 | #include "mb/pg_wchar.h" |
55 | #include "miscadmin.h" |
56 | #include "parser/parsetree.h" |
57 | #include "storage/bufmgr.h" |
58 | #include "storage/lmgr.h" |
59 | #include "tcop/utility.h" |
60 | #include "utils/acl.h" |
61 | #include "utils/lsyscache.h" |
62 | #include "utils/memutils.h" |
63 | #include "utils/partcache.h" |
64 | #include "utils/rls.h" |
65 | #include "utils/ruleutils.h" |
66 | #include "utils/snapmgr.h" |
67 | |
68 | |
69 | /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */ |
70 | ExecutorStart_hook_type ExecutorStart_hook = NULL; |
71 | ExecutorRun_hook_type ExecutorRun_hook = NULL; |
72 | ExecutorFinish_hook_type ExecutorFinish_hook = NULL; |
73 | ExecutorEnd_hook_type ExecutorEnd_hook = NULL; |
74 | |
75 | /* Hook for plugin to get control in ExecCheckRTPerms() */ |
76 | ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL; |
77 | |
78 | /* decls for local routines only used within this module */ |
79 | static void InitPlan(QueryDesc *queryDesc, int eflags); |
80 | static void CheckValidRowMarkRel(Relation rel, RowMarkType markType); |
81 | static void ExecPostprocessPlan(EState *estate); |
82 | static void ExecEndPlan(PlanState *planstate, EState *estate); |
83 | static void ExecutePlan(EState *estate, PlanState *planstate, |
84 | bool use_parallel_mode, |
85 | CmdType operation, |
86 | bool sendTuples, |
87 | uint64 numberTuples, |
88 | ScanDirection direction, |
89 | DestReceiver *dest, |
90 | bool execute_once); |
91 | static bool ExecCheckRTEPerms(RangeTblEntry *rte); |
92 | static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid, |
93 | Bitmapset *modifiedCols, |
94 | AclMode requiredPerms); |
95 | static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt); |
96 | static char *ExecBuildSlotValueDescription(Oid reloid, |
97 | TupleTableSlot *slot, |
98 | TupleDesc tupdesc, |
99 | Bitmapset *modifiedCols, |
100 | int maxfieldlen); |
101 | static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree); |
102 | |
103 | /* |
104 | * Note that GetAllUpdatedColumns() also exists in commands/trigger.c. There does |
105 | * not appear to be any good header to put it into, given the structures that |
106 | * it uses, so we let them be duplicated. Be sure to update both if one needs |
107 | * to be changed, however. |
108 | */ |
109 | #define GetInsertedColumns(relinfo, estate) \ |
110 | (exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->insertedCols) |
111 | #define GetUpdatedColumns(relinfo, estate) \ |
112 | (exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols) |
113 | #define GetAllUpdatedColumns(relinfo, estate) \ |
114 | (bms_union(exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols, \ |
115 | exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->extraUpdatedCols)) |
116 | |
117 | /* end of local decls */ |
118 | |
119 | |
120 | /* ---------------------------------------------------------------- |
121 | * ExecutorStart |
122 | * |
123 | * This routine must be called at the beginning of any execution of any |
124 | * query plan |
125 | * |
126 | * Takes a QueryDesc previously created by CreateQueryDesc (which is separate |
127 | * only because some places use QueryDescs for utility commands). The tupDesc |
128 | * field of the QueryDesc is filled in to describe the tuples that will be |
129 | * returned, and the internal fields (estate and planstate) are set up. |
130 | * |
131 | * eflags contains flag bits as described in executor.h. |
132 | * |
133 | * NB: the CurrentMemoryContext when this is called will become the parent |
134 | * of the per-query context used for this Executor invocation. |
135 | * |
136 | * We provide a function hook variable that lets loadable plugins |
137 | * get control when ExecutorStart is called. Such a plugin would |
138 | * normally call standard_ExecutorStart(). |
139 | * |
140 | * ---------------------------------------------------------------- |
141 | */ |
142 | void |
143 | ExecutorStart(QueryDesc *queryDesc, int eflags) |
144 | { |
145 | if (ExecutorStart_hook) |
146 | (*ExecutorStart_hook) (queryDesc, eflags); |
147 | else |
148 | standard_ExecutorStart(queryDesc, eflags); |
149 | } |
150 | |
151 | void |
152 | standard_ExecutorStart(QueryDesc *queryDesc, int eflags) |
153 | { |
154 | EState *estate; |
155 | MemoryContext oldcontext; |
156 | |
157 | /* sanity checks: queryDesc must not be started already */ |
158 | Assert(queryDesc != NULL); |
159 | Assert(queryDesc->estate == NULL); |
160 | |
161 | /* |
162 | * If the transaction is read-only, we need to check if any writes are |
163 | * planned to non-temporary tables. EXPLAIN is considered read-only. |
164 | * |
165 | * Don't allow writes in parallel mode. Supporting UPDATE and DELETE |
166 | * would require (a) storing the combocid hash in shared memory, rather |
167 | * than synchronizing it just once at the start of parallelism, and (b) an |
168 | * alternative to heap_update()'s reliance on xmax for mutual exclusion. |
169 | * INSERT may have no such troubles, but we forbid it to simplify the |
170 | * checks. |
171 | * |
172 | * We have lower-level defenses in CommandCounterIncrement and elsewhere |
173 | * against performing unsafe operations in parallel mode, but this gives a |
174 | * more user-friendly error message. |
175 | */ |
176 | if ((XactReadOnly || IsInParallelMode()) && |
177 | !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) |
178 | ExecCheckXactReadOnly(queryDesc->plannedstmt); |
179 | |
180 | /* |
181 | * Build EState, switch into per-query memory context for startup. |
182 | */ |
183 | estate = CreateExecutorState(); |
184 | queryDesc->estate = estate; |
185 | |
186 | oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); |
187 | |
188 | /* |
189 | * Fill in external parameters, if any, from queryDesc; and allocate |
190 | * workspace for internal parameters |
191 | */ |
192 | estate->es_param_list_info = queryDesc->params; |
193 | |
194 | if (queryDesc->plannedstmt->paramExecTypes != NIL) |
195 | { |
196 | int nParamExec; |
197 | |
198 | nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes); |
199 | estate->es_param_exec_vals = (ParamExecData *) |
200 | palloc0(nParamExec * sizeof(ParamExecData)); |
201 | } |
202 | |
203 | estate->es_sourceText = queryDesc->sourceText; |
204 | |
205 | /* |
206 | * Fill in the query environment, if any, from queryDesc. |
207 | */ |
208 | estate->es_queryEnv = queryDesc->queryEnv; |
209 | |
210 | /* |
211 | * If non-read-only query, set the command ID to mark output tuples with |
212 | */ |
213 | switch (queryDesc->operation) |
214 | { |
215 | case CMD_SELECT: |
216 | |
217 | /* |
218 | * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark |
219 | * tuples |
220 | */ |
221 | if (queryDesc->plannedstmt->rowMarks != NIL || |
222 | queryDesc->plannedstmt->hasModifyingCTE) |
223 | estate->es_output_cid = GetCurrentCommandId(true); |
224 | |
225 | /* |
226 | * A SELECT without modifying CTEs can't possibly queue triggers, |
227 | * so force skip-triggers mode. This is just a marginal efficiency |
228 | * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't |
229 | * all that expensive, but we might as well do it. |
230 | */ |
231 | if (!queryDesc->plannedstmt->hasModifyingCTE) |
232 | eflags |= EXEC_FLAG_SKIP_TRIGGERS; |
233 | break; |
234 | |
235 | case CMD_INSERT: |
236 | case CMD_DELETE: |
237 | case CMD_UPDATE: |
238 | estate->es_output_cid = GetCurrentCommandId(true); |
239 | break; |
240 | |
241 | default: |
242 | elog(ERROR, "unrecognized operation code: %d" , |
243 | (int) queryDesc->operation); |
244 | break; |
245 | } |
246 | |
247 | /* |
248 | * Copy other important information into the EState |
249 | */ |
250 | estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot); |
251 | estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot); |
252 | estate->es_top_eflags = eflags; |
253 | estate->es_instrument = queryDesc->instrument_options; |
254 | estate->es_jit_flags = queryDesc->plannedstmt->jitFlags; |
255 | |
256 | /* |
257 | * Set up an AFTER-trigger statement context, unless told not to, or |
258 | * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called). |
259 | */ |
260 | if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY))) |
261 | AfterTriggerBeginQuery(); |
262 | |
263 | /* |
264 | * Initialize the plan state tree |
265 | */ |
266 | InitPlan(queryDesc, eflags); |
267 | |
268 | MemoryContextSwitchTo(oldcontext); |
269 | } |
270 | |
271 | /* ---------------------------------------------------------------- |
272 | * ExecutorRun |
273 | * |
274 | * This is the main routine of the executor module. It accepts |
275 | * the query descriptor from the traffic cop and executes the |
276 | * query plan. |
277 | * |
278 | * ExecutorStart must have been called already. |
279 | * |
280 | * If direction is NoMovementScanDirection then nothing is done |
281 | * except to start up/shut down the destination. Otherwise, |
282 | * we retrieve up to 'count' tuples in the specified direction. |
283 | * |
284 | * Note: count = 0 is interpreted as no portal limit, i.e., run to |
285 | * completion. Also note that the count limit is only applied to |
286 | * retrieved tuples, not for instance to those inserted/updated/deleted |
287 | * by a ModifyTable plan node. |
288 | * |
289 | * There is no return value, but output tuples (if any) are sent to |
290 | * the destination receiver specified in the QueryDesc; and the number |
291 | * of tuples processed at the top level can be found in |
292 | * estate->es_processed. |
293 | * |
294 | * We provide a function hook variable that lets loadable plugins |
295 | * get control when ExecutorRun is called. Such a plugin would |
296 | * normally call standard_ExecutorRun(). |
297 | * |
298 | * ---------------------------------------------------------------- |
299 | */ |
300 | void |
301 | ExecutorRun(QueryDesc *queryDesc, |
302 | ScanDirection direction, uint64 count, |
303 | bool execute_once) |
304 | { |
305 | if (ExecutorRun_hook) |
306 | (*ExecutorRun_hook) (queryDesc, direction, count, execute_once); |
307 | else |
308 | standard_ExecutorRun(queryDesc, direction, count, execute_once); |
309 | } |
310 | |
311 | void |
312 | standard_ExecutorRun(QueryDesc *queryDesc, |
313 | ScanDirection direction, uint64 count, bool execute_once) |
314 | { |
315 | EState *estate; |
316 | CmdType operation; |
317 | DestReceiver *dest; |
318 | bool sendTuples; |
319 | MemoryContext oldcontext; |
320 | |
321 | /* sanity checks */ |
322 | Assert(queryDesc != NULL); |
323 | |
324 | estate = queryDesc->estate; |
325 | |
326 | Assert(estate != NULL); |
327 | Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY)); |
328 | |
329 | /* |
330 | * Switch into per-query memory context |
331 | */ |
332 | oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); |
333 | |
334 | /* Allow instrumentation of Executor overall runtime */ |
335 | if (queryDesc->totaltime) |
336 | InstrStartNode(queryDesc->totaltime); |
337 | |
338 | /* |
339 | * extract information from the query descriptor and the query feature. |
340 | */ |
341 | operation = queryDesc->operation; |
342 | dest = queryDesc->dest; |
343 | |
344 | /* |
345 | * startup tuple receiver, if we will be emitting tuples |
346 | */ |
347 | estate->es_processed = 0; |
348 | |
349 | sendTuples = (operation == CMD_SELECT || |
350 | queryDesc->plannedstmt->hasReturning); |
351 | |
352 | if (sendTuples) |
353 | dest->rStartup(dest, operation, queryDesc->tupDesc); |
354 | |
355 | /* |
356 | * run plan |
357 | */ |
358 | if (!ScanDirectionIsNoMovement(direction)) |
359 | { |
360 | if (execute_once && queryDesc->already_executed) |
361 | elog(ERROR, "can't re-execute query flagged for single execution" ); |
362 | queryDesc->already_executed = true; |
363 | |
364 | ExecutePlan(estate, |
365 | queryDesc->planstate, |
366 | queryDesc->plannedstmt->parallelModeNeeded, |
367 | operation, |
368 | sendTuples, |
369 | count, |
370 | direction, |
371 | dest, |
372 | execute_once); |
373 | } |
374 | |
375 | /* |
376 | * shutdown tuple receiver, if we started it |
377 | */ |
378 | if (sendTuples) |
379 | dest->rShutdown(dest); |
380 | |
381 | if (queryDesc->totaltime) |
382 | InstrStopNode(queryDesc->totaltime, estate->es_processed); |
383 | |
384 | MemoryContextSwitchTo(oldcontext); |
385 | } |
386 | |
387 | /* ---------------------------------------------------------------- |
388 | * ExecutorFinish |
389 | * |
390 | * This routine must be called after the last ExecutorRun call. |
391 | * It performs cleanup such as firing AFTER triggers. It is |
392 | * separate from ExecutorEnd because EXPLAIN ANALYZE needs to |
393 | * include these actions in the total runtime. |
394 | * |
395 | * We provide a function hook variable that lets loadable plugins |
396 | * get control when ExecutorFinish is called. Such a plugin would |
397 | * normally call standard_ExecutorFinish(). |
398 | * |
399 | * ---------------------------------------------------------------- |
400 | */ |
401 | void |
402 | ExecutorFinish(QueryDesc *queryDesc) |
403 | { |
404 | if (ExecutorFinish_hook) |
405 | (*ExecutorFinish_hook) (queryDesc); |
406 | else |
407 | standard_ExecutorFinish(queryDesc); |
408 | } |
409 | |
410 | void |
411 | standard_ExecutorFinish(QueryDesc *queryDesc) |
412 | { |
413 | EState *estate; |
414 | MemoryContext oldcontext; |
415 | |
416 | /* sanity checks */ |
417 | Assert(queryDesc != NULL); |
418 | |
419 | estate = queryDesc->estate; |
420 | |
421 | Assert(estate != NULL); |
422 | Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY)); |
423 | |
424 | /* This should be run once and only once per Executor instance */ |
425 | Assert(!estate->es_finished); |
426 | |
427 | /* Switch into per-query memory context */ |
428 | oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); |
429 | |
430 | /* Allow instrumentation of Executor overall runtime */ |
431 | if (queryDesc->totaltime) |
432 | InstrStartNode(queryDesc->totaltime); |
433 | |
434 | /* Run ModifyTable nodes to completion */ |
435 | ExecPostprocessPlan(estate); |
436 | |
437 | /* Execute queued AFTER triggers, unless told not to */ |
438 | if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS)) |
439 | AfterTriggerEndQuery(estate); |
440 | |
441 | if (queryDesc->totaltime) |
442 | InstrStopNode(queryDesc->totaltime, 0); |
443 | |
444 | MemoryContextSwitchTo(oldcontext); |
445 | |
446 | estate->es_finished = true; |
447 | } |
448 | |
449 | /* ---------------------------------------------------------------- |
450 | * ExecutorEnd |
451 | * |
452 | * This routine must be called at the end of execution of any |
453 | * query plan |
454 | * |
455 | * We provide a function hook variable that lets loadable plugins |
456 | * get control when ExecutorEnd is called. Such a plugin would |
457 | * normally call standard_ExecutorEnd(). |
458 | * |
459 | * ---------------------------------------------------------------- |
460 | */ |
461 | void |
462 | ExecutorEnd(QueryDesc *queryDesc) |
463 | { |
464 | if (ExecutorEnd_hook) |
465 | (*ExecutorEnd_hook) (queryDesc); |
466 | else |
467 | standard_ExecutorEnd(queryDesc); |
468 | } |
469 | |
470 | void |
471 | standard_ExecutorEnd(QueryDesc *queryDesc) |
472 | { |
473 | EState *estate; |
474 | MemoryContext oldcontext; |
475 | |
476 | /* sanity checks */ |
477 | Assert(queryDesc != NULL); |
478 | |
479 | estate = queryDesc->estate; |
480 | |
481 | Assert(estate != NULL); |
482 | |
483 | /* |
484 | * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This |
485 | * Assert is needed because ExecutorFinish is new as of 9.1, and callers |
486 | * might forget to call it. |
487 | */ |
488 | Assert(estate->es_finished || |
489 | (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY)); |
490 | |
491 | /* |
492 | * Switch into per-query memory context to run ExecEndPlan |
493 | */ |
494 | oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); |
495 | |
496 | ExecEndPlan(queryDesc->planstate, estate); |
497 | |
498 | /* do away with our snapshots */ |
499 | UnregisterSnapshot(estate->es_snapshot); |
500 | UnregisterSnapshot(estate->es_crosscheck_snapshot); |
501 | |
502 | /* |
503 | * Must switch out of context before destroying it |
504 | */ |
505 | MemoryContextSwitchTo(oldcontext); |
506 | |
507 | /* |
508 | * Release EState and per-query memory context. This should release |
509 | * everything the executor has allocated. |
510 | */ |
511 | FreeExecutorState(estate); |
512 | |
513 | /* Reset queryDesc fields that no longer point to anything */ |
514 | queryDesc->tupDesc = NULL; |
515 | queryDesc->estate = NULL; |
516 | queryDesc->planstate = NULL; |
517 | queryDesc->totaltime = NULL; |
518 | } |
519 | |
520 | /* ---------------------------------------------------------------- |
521 | * ExecutorRewind |
522 | * |
523 | * This routine may be called on an open queryDesc to rewind it |
524 | * to the start. |
525 | * ---------------------------------------------------------------- |
526 | */ |
527 | void |
528 | ExecutorRewind(QueryDesc *queryDesc) |
529 | { |
530 | EState *estate; |
531 | MemoryContext oldcontext; |
532 | |
533 | /* sanity checks */ |
534 | Assert(queryDesc != NULL); |
535 | |
536 | estate = queryDesc->estate; |
537 | |
538 | Assert(estate != NULL); |
539 | |
540 | /* It's probably not sensible to rescan updating queries */ |
541 | Assert(queryDesc->operation == CMD_SELECT); |
542 | |
543 | /* |
544 | * Switch into per-query memory context |
545 | */ |
546 | oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); |
547 | |
548 | /* |
549 | * rescan plan |
550 | */ |
551 | ExecReScan(queryDesc->planstate); |
552 | |
553 | MemoryContextSwitchTo(oldcontext); |
554 | } |
555 | |
556 | |
557 | /* |
558 | * ExecCheckRTPerms |
559 | * Check access permissions for all relations listed in a range table. |
560 | * |
561 | * Returns true if permissions are adequate. Otherwise, throws an appropriate |
562 | * error if ereport_on_violation is true, or simply returns false otherwise. |
563 | * |
564 | * Note that this does NOT address row level security policies (aka: RLS). If |
565 | * rows will be returned to the user as a result of this permission check |
566 | * passing, then RLS also needs to be consulted (and check_enable_rls()). |
567 | * |
568 | * See rewrite/rowsecurity.c. |
569 | */ |
570 | bool |
571 | ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) |
572 | { |
573 | ListCell *l; |
574 | bool result = true; |
575 | |
576 | foreach(l, rangeTable) |
577 | { |
578 | RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); |
579 | |
580 | result = ExecCheckRTEPerms(rte); |
581 | if (!result) |
582 | { |
583 | Assert(rte->rtekind == RTE_RELATION); |
584 | if (ereport_on_violation) |
585 | aclcheck_error(ACLCHECK_NO_PRIV, get_relkind_objtype(get_rel_relkind(rte->relid)), |
586 | get_rel_name(rte->relid)); |
587 | return false; |
588 | } |
589 | } |
590 | |
591 | if (ExecutorCheckPerms_hook) |
592 | result = (*ExecutorCheckPerms_hook) (rangeTable, |
593 | ereport_on_violation); |
594 | return result; |
595 | } |
596 | |
597 | /* |
598 | * ExecCheckRTEPerms |
599 | * Check access permissions for a single RTE. |
600 | */ |
601 | static bool |
602 | ExecCheckRTEPerms(RangeTblEntry *rte) |
603 | { |
604 | AclMode requiredPerms; |
605 | AclMode relPerms; |
606 | AclMode remainingPerms; |
607 | Oid relOid; |
608 | Oid userid; |
609 | |
610 | /* |
611 | * Only plain-relation RTEs need to be checked here. Function RTEs are |
612 | * checked when the function is prepared for execution. Join, subquery, |
613 | * and special RTEs need no checks. |
614 | */ |
615 | if (rte->rtekind != RTE_RELATION) |
616 | return true; |
617 | |
618 | /* |
619 | * No work if requiredPerms is empty. |
620 | */ |
621 | requiredPerms = rte->requiredPerms; |
622 | if (requiredPerms == 0) |
623 | return true; |
624 | |
625 | relOid = rte->relid; |
626 | |
627 | /* |
628 | * userid to check as: current user unless we have a setuid indication. |
629 | * |
630 | * Note: GetUserId() is presently fast enough that there's no harm in |
631 | * calling it separately for each RTE. If that stops being true, we could |
632 | * call it once in ExecCheckRTPerms and pass the userid down from there. |
633 | * But for now, no need for the extra clutter. |
634 | */ |
635 | userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); |
636 | |
637 | /* |
638 | * We must have *all* the requiredPerms bits, but some of the bits can be |
639 | * satisfied from column-level rather than relation-level permissions. |
640 | * First, remove any bits that are satisfied by relation permissions. |
641 | */ |
642 | relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL); |
643 | remainingPerms = requiredPerms & ~relPerms; |
644 | if (remainingPerms != 0) |
645 | { |
646 | int col = -1; |
647 | |
648 | /* |
649 | * If we lack any permissions that exist only as relation permissions, |
650 | * we can fail straight away. |
651 | */ |
652 | if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE)) |
653 | return false; |
654 | |
655 | /* |
656 | * Check to see if we have the needed privileges at column level. |
657 | * |
658 | * Note: failures just report a table-level error; it would be nicer |
659 | * to report a column-level error if we have some but not all of the |
660 | * column privileges. |
661 | */ |
662 | if (remainingPerms & ACL_SELECT) |
663 | { |
664 | /* |
665 | * When the query doesn't explicitly reference any columns (for |
666 | * example, SELECT COUNT(*) FROM table), allow the query if we |
667 | * have SELECT on any column of the rel, as per SQL spec. |
668 | */ |
669 | if (bms_is_empty(rte->selectedCols)) |
670 | { |
671 | if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT, |
672 | ACLMASK_ANY) != ACLCHECK_OK) |
673 | return false; |
674 | } |
675 | |
676 | while ((col = bms_next_member(rte->selectedCols, col)) >= 0) |
677 | { |
678 | /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */ |
679 | AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber; |
680 | |
681 | if (attno == InvalidAttrNumber) |
682 | { |
683 | /* Whole-row reference, must have priv on all cols */ |
684 | if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT, |
685 | ACLMASK_ALL) != ACLCHECK_OK) |
686 | return false; |
687 | } |
688 | else |
689 | { |
690 | if (pg_attribute_aclcheck(relOid, attno, userid, |
691 | ACL_SELECT) != ACLCHECK_OK) |
692 | return false; |
693 | } |
694 | } |
695 | } |
696 | |
697 | /* |
698 | * Basically the same for the mod columns, for both INSERT and UPDATE |
699 | * privilege as specified by remainingPerms. |
700 | */ |
701 | if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid, |
702 | userid, |
703 | rte->insertedCols, |
704 | ACL_INSERT)) |
705 | return false; |
706 | |
707 | if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid, |
708 | userid, |
709 | rte->updatedCols, |
710 | ACL_UPDATE)) |
711 | return false; |
712 | } |
713 | return true; |
714 | } |
715 | |
716 | /* |
717 | * ExecCheckRTEPermsModified |
718 | * Check INSERT or UPDATE access permissions for a single RTE (these |
719 | * are processed uniformly). |
720 | */ |
721 | static bool |
722 | ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols, |
723 | AclMode requiredPerms) |
724 | { |
725 | int col = -1; |
726 | |
727 | /* |
728 | * When the query doesn't explicitly update any columns, allow the query |
729 | * if we have permission on any column of the rel. This is to handle |
730 | * SELECT FOR UPDATE as well as possible corner cases in UPDATE. |
731 | */ |
732 | if (bms_is_empty(modifiedCols)) |
733 | { |
734 | if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms, |
735 | ACLMASK_ANY) != ACLCHECK_OK) |
736 | return false; |
737 | } |
738 | |
739 | while ((col = bms_next_member(modifiedCols, col)) >= 0) |
740 | { |
741 | /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */ |
742 | AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber; |
743 | |
744 | if (attno == InvalidAttrNumber) |
745 | { |
746 | /* whole-row reference can't happen here */ |
747 | elog(ERROR, "whole-row update is not implemented" ); |
748 | } |
749 | else |
750 | { |
751 | if (pg_attribute_aclcheck(relOid, attno, userid, |
752 | requiredPerms) != ACLCHECK_OK) |
753 | return false; |
754 | } |
755 | } |
756 | return true; |
757 | } |
758 | |
759 | /* |
760 | * Check that the query does not imply any writes to non-temp tables; |
761 | * unless we're in parallel mode, in which case don't even allow writes |
762 | * to temp tables. |
763 | * |
764 | * Note: in a Hot Standby this would need to reject writes to temp |
765 | * tables just as we do in parallel mode; but an HS standby can't have created |
766 | * any temp tables in the first place, so no need to check that. |
767 | */ |
768 | static void |
769 | ExecCheckXactReadOnly(PlannedStmt *plannedstmt) |
770 | { |
771 | ListCell *l; |
772 | |
773 | /* |
774 | * Fail if write permissions are requested in parallel mode for table |
775 | * (temp or non-temp), otherwise fail for any non-temp table. |
776 | */ |
777 | foreach(l, plannedstmt->rtable) |
778 | { |
779 | RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); |
780 | |
781 | if (rte->rtekind != RTE_RELATION) |
782 | continue; |
783 | |
784 | if ((rte->requiredPerms & (~ACL_SELECT)) == 0) |
785 | continue; |
786 | |
787 | if (isTempNamespace(get_rel_namespace(rte->relid))) |
788 | continue; |
789 | |
790 | PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt)); |
791 | } |
792 | |
793 | if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE) |
794 | PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt)); |
795 | } |
796 | |
797 | |
798 | /* ---------------------------------------------------------------- |
799 | * InitPlan |
800 | * |
801 | * Initializes the query plan: open files, allocate storage |
802 | * and start up the rule manager |
803 | * ---------------------------------------------------------------- |
804 | */ |
805 | static void |
806 | InitPlan(QueryDesc *queryDesc, int eflags) |
807 | { |
808 | CmdType operation = queryDesc->operation; |
809 | PlannedStmt *plannedstmt = queryDesc->plannedstmt; |
810 | Plan *plan = plannedstmt->planTree; |
811 | List *rangeTable = plannedstmt->rtable; |
812 | EState *estate = queryDesc->estate; |
813 | PlanState *planstate; |
814 | TupleDesc tupType; |
815 | ListCell *l; |
816 | int i; |
817 | |
818 | /* |
819 | * Do permissions checks |
820 | */ |
821 | ExecCheckRTPerms(rangeTable, true); |
822 | |
823 | /* |
824 | * initialize the node's execution state |
825 | */ |
826 | ExecInitRangeTable(estate, rangeTable); |
827 | |
828 | estate->es_plannedstmt = plannedstmt; |
829 | |
830 | /* |
831 | * Initialize ResultRelInfo data structures, and open the result rels. |
832 | */ |
833 | if (plannedstmt->resultRelations) |
834 | { |
835 | List *resultRelations = plannedstmt->resultRelations; |
836 | int numResultRelations = list_length(resultRelations); |
837 | ResultRelInfo *resultRelInfos; |
838 | ResultRelInfo *resultRelInfo; |
839 | |
840 | resultRelInfos = (ResultRelInfo *) |
841 | palloc(numResultRelations * sizeof(ResultRelInfo)); |
842 | resultRelInfo = resultRelInfos; |
843 | foreach(l, resultRelations) |
844 | { |
845 | Index resultRelationIndex = lfirst_int(l); |
846 | Relation resultRelation; |
847 | |
848 | resultRelation = ExecGetRangeTableRelation(estate, |
849 | resultRelationIndex); |
850 | InitResultRelInfo(resultRelInfo, |
851 | resultRelation, |
852 | resultRelationIndex, |
853 | NULL, |
854 | estate->es_instrument); |
855 | resultRelInfo++; |
856 | } |
857 | estate->es_result_relations = resultRelInfos; |
858 | estate->es_num_result_relations = numResultRelations; |
859 | |
860 | /* es_result_relation_info is NULL except when within ModifyTable */ |
861 | estate->es_result_relation_info = NULL; |
862 | |
863 | /* |
864 | * In the partitioned result relation case, also build ResultRelInfos |
865 | * for all the partitioned table roots, because we will need them to |
866 | * fire statement-level triggers, if any. |
867 | */ |
868 | if (plannedstmt->rootResultRelations) |
869 | { |
870 | int num_roots = list_length(plannedstmt->rootResultRelations); |
871 | |
872 | resultRelInfos = (ResultRelInfo *) |
873 | palloc(num_roots * sizeof(ResultRelInfo)); |
874 | resultRelInfo = resultRelInfos; |
875 | foreach(l, plannedstmt->rootResultRelations) |
876 | { |
877 | Index resultRelIndex = lfirst_int(l); |
878 | Relation resultRelDesc; |
879 | |
880 | resultRelDesc = ExecGetRangeTableRelation(estate, |
881 | resultRelIndex); |
882 | InitResultRelInfo(resultRelInfo, |
883 | resultRelDesc, |
884 | resultRelIndex, |
885 | NULL, |
886 | estate->es_instrument); |
887 | resultRelInfo++; |
888 | } |
889 | |
890 | estate->es_root_result_relations = resultRelInfos; |
891 | estate->es_num_root_result_relations = num_roots; |
892 | } |
893 | else |
894 | { |
895 | estate->es_root_result_relations = NULL; |
896 | estate->es_num_root_result_relations = 0; |
897 | } |
898 | } |
899 | else |
900 | { |
901 | /* |
902 | * if no result relation, then set state appropriately |
903 | */ |
904 | estate->es_result_relations = NULL; |
905 | estate->es_num_result_relations = 0; |
906 | estate->es_result_relation_info = NULL; |
907 | estate->es_root_result_relations = NULL; |
908 | estate->es_num_root_result_relations = 0; |
909 | } |
910 | |
911 | /* |
912 | * Next, build the ExecRowMark array from the PlanRowMark(s), if any. |
913 | */ |
914 | if (plannedstmt->rowMarks) |
915 | { |
916 | estate->es_rowmarks = (ExecRowMark **) |
917 | palloc0(estate->es_range_table_size * sizeof(ExecRowMark *)); |
918 | foreach(l, plannedstmt->rowMarks) |
919 | { |
920 | PlanRowMark *rc = (PlanRowMark *) lfirst(l); |
921 | Oid relid; |
922 | Relation relation; |
923 | ExecRowMark *erm; |
924 | |
925 | /* ignore "parent" rowmarks; they are irrelevant at runtime */ |
926 | if (rc->isParent) |
927 | continue; |
928 | |
929 | /* get relation's OID (will produce InvalidOid if subquery) */ |
930 | relid = exec_rt_fetch(rc->rti, estate)->relid; |
931 | |
932 | /* open relation, if we need to access it for this mark type */ |
933 | switch (rc->markType) |
934 | { |
935 | case ROW_MARK_EXCLUSIVE: |
936 | case ROW_MARK_NOKEYEXCLUSIVE: |
937 | case ROW_MARK_SHARE: |
938 | case ROW_MARK_KEYSHARE: |
939 | case ROW_MARK_REFERENCE: |
940 | relation = ExecGetRangeTableRelation(estate, rc->rti); |
941 | break; |
942 | case ROW_MARK_COPY: |
943 | /* no physical table access is required */ |
944 | relation = NULL; |
945 | break; |
946 | default: |
947 | elog(ERROR, "unrecognized markType: %d" , rc->markType); |
948 | relation = NULL; /* keep compiler quiet */ |
949 | break; |
950 | } |
951 | |
952 | /* Check that relation is a legal target for marking */ |
953 | if (relation) |
954 | CheckValidRowMarkRel(relation, rc->markType); |
955 | |
956 | erm = (ExecRowMark *) palloc(sizeof(ExecRowMark)); |
957 | erm->relation = relation; |
958 | erm->relid = relid; |
959 | erm->rti = rc->rti; |
960 | erm->prti = rc->prti; |
961 | erm->rowmarkId = rc->rowmarkId; |
962 | erm->markType = rc->markType; |
963 | erm->strength = rc->strength; |
964 | erm->waitPolicy = rc->waitPolicy; |
965 | erm->ermActive = false; |
966 | ItemPointerSetInvalid(&(erm->curCtid)); |
967 | erm->ermExtra = NULL; |
968 | |
969 | Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size && |
970 | estate->es_rowmarks[erm->rti - 1] == NULL); |
971 | |
972 | estate->es_rowmarks[erm->rti - 1] = erm; |
973 | } |
974 | } |
975 | |
976 | /* |
977 | * Initialize the executor's tuple table to empty. |
978 | */ |
979 | estate->es_tupleTable = NIL; |
980 | |
981 | /* signal that this EState is not used for EPQ */ |
982 | estate->es_epq_active = NULL; |
983 | |
984 | /* |
985 | * Initialize private state information for each SubPlan. We must do this |
986 | * before running ExecInitNode on the main query tree, since |
987 | * ExecInitSubPlan expects to be able to find these entries. |
988 | */ |
989 | Assert(estate->es_subplanstates == NIL); |
990 | i = 1; /* subplan indices count from 1 */ |
991 | foreach(l, plannedstmt->subplans) |
992 | { |
993 | Plan *subplan = (Plan *) lfirst(l); |
994 | PlanState *subplanstate; |
995 | int sp_eflags; |
996 | |
997 | /* |
998 | * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If |
999 | * it is a parameterless subplan (not initplan), we suggest that it be |
1000 | * prepared to handle REWIND efficiently; otherwise there is no need. |
1001 | */ |
1002 | sp_eflags = eflags |
1003 | & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA); |
1004 | if (bms_is_member(i, plannedstmt->rewindPlanIDs)) |
1005 | sp_eflags |= EXEC_FLAG_REWIND; |
1006 | |
1007 | subplanstate = ExecInitNode(subplan, estate, sp_eflags); |
1008 | |
1009 | estate->es_subplanstates = lappend(estate->es_subplanstates, |
1010 | subplanstate); |
1011 | |
1012 | i++; |
1013 | } |
1014 | |
1015 | /* |
1016 | * Initialize the private state information for all the nodes in the query |
1017 | * tree. This opens files, allocates storage and leaves us ready to start |
1018 | * processing tuples. |
1019 | */ |
1020 | planstate = ExecInitNode(plan, estate, eflags); |
1021 | |
1022 | /* |
1023 | * Get the tuple descriptor describing the type of tuples to return. |
1024 | */ |
1025 | tupType = ExecGetResultType(planstate); |
1026 | |
1027 | /* |
1028 | * Initialize the junk filter if needed. SELECT queries need a filter if |
1029 | * there are any junk attrs in the top-level tlist. |
1030 | */ |
1031 | if (operation == CMD_SELECT) |
1032 | { |
1033 | bool junk_filter_needed = false; |
1034 | ListCell *tlist; |
1035 | |
1036 | foreach(tlist, plan->targetlist) |
1037 | { |
1038 | TargetEntry *tle = (TargetEntry *) lfirst(tlist); |
1039 | |
1040 | if (tle->resjunk) |
1041 | { |
1042 | junk_filter_needed = true; |
1043 | break; |
1044 | } |
1045 | } |
1046 | |
1047 | if (junk_filter_needed) |
1048 | { |
1049 | JunkFilter *j; |
1050 | TupleTableSlot *slot; |
1051 | |
1052 | slot = ExecInitExtraTupleSlot(estate, NULL, &TTSOpsVirtual); |
1053 | j = ExecInitJunkFilter(planstate->plan->targetlist, |
1054 | slot); |
1055 | estate->es_junkFilter = j; |
1056 | |
1057 | /* Want to return the cleaned tuple type */ |
1058 | tupType = j->jf_cleanTupType; |
1059 | } |
1060 | } |
1061 | |
1062 | queryDesc->tupDesc = tupType; |
1063 | queryDesc->planstate = planstate; |
1064 | } |
1065 | |
1066 | /* |
1067 | * Check that a proposed result relation is a legal target for the operation |
1068 | * |
1069 | * Generally the parser and/or planner should have noticed any such mistake |
1070 | * already, but let's make sure. |
1071 | * |
1072 | * Note: when changing this function, you probably also need to look at |
1073 | * CheckValidRowMarkRel. |
1074 | */ |
1075 | void |
1076 | CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation) |
1077 | { |
1078 | Relation resultRel = resultRelInfo->ri_RelationDesc; |
1079 | TriggerDesc *trigDesc = resultRel->trigdesc; |
1080 | FdwRoutine *fdwroutine; |
1081 | |
1082 | switch (resultRel->rd_rel->relkind) |
1083 | { |
1084 | case RELKIND_RELATION: |
1085 | case RELKIND_PARTITIONED_TABLE: |
1086 | CheckCmdReplicaIdentity(resultRel, operation); |
1087 | break; |
1088 | case RELKIND_SEQUENCE: |
1089 | ereport(ERROR, |
1090 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1091 | errmsg("cannot change sequence \"%s\"" , |
1092 | RelationGetRelationName(resultRel)))); |
1093 | break; |
1094 | case RELKIND_TOASTVALUE: |
1095 | ereport(ERROR, |
1096 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1097 | errmsg("cannot change TOAST relation \"%s\"" , |
1098 | RelationGetRelationName(resultRel)))); |
1099 | break; |
1100 | case RELKIND_VIEW: |
1101 | |
1102 | /* |
1103 | * Okay only if there's a suitable INSTEAD OF trigger. Messages |
1104 | * here should match rewriteHandler.c's rewriteTargetView, except |
1105 | * that we omit errdetail because we haven't got the information |
1106 | * handy (and given that we really shouldn't get here anyway, it's |
1107 | * not worth great exertion to get). |
1108 | */ |
1109 | switch (operation) |
1110 | { |
1111 | case CMD_INSERT: |
1112 | if (!trigDesc || !trigDesc->trig_insert_instead_row) |
1113 | ereport(ERROR, |
1114 | (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
1115 | errmsg("cannot insert into view \"%s\"" , |
1116 | RelationGetRelationName(resultRel)), |
1117 | errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule." ))); |
1118 | break; |
1119 | case CMD_UPDATE: |
1120 | if (!trigDesc || !trigDesc->trig_update_instead_row) |
1121 | ereport(ERROR, |
1122 | (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
1123 | errmsg("cannot update view \"%s\"" , |
1124 | RelationGetRelationName(resultRel)), |
1125 | errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule." ))); |
1126 | break; |
1127 | case CMD_DELETE: |
1128 | if (!trigDesc || !trigDesc->trig_delete_instead_row) |
1129 | ereport(ERROR, |
1130 | (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
1131 | errmsg("cannot delete from view \"%s\"" , |
1132 | RelationGetRelationName(resultRel)), |
1133 | errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule." ))); |
1134 | break; |
1135 | default: |
1136 | elog(ERROR, "unrecognized CmdType: %d" , (int) operation); |
1137 | break; |
1138 | } |
1139 | break; |
1140 | case RELKIND_MATVIEW: |
1141 | if (!MatViewIncrementalMaintenanceIsEnabled()) |
1142 | ereport(ERROR, |
1143 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1144 | errmsg("cannot change materialized view \"%s\"" , |
1145 | RelationGetRelationName(resultRel)))); |
1146 | break; |
1147 | case RELKIND_FOREIGN_TABLE: |
1148 | /* Okay only if the FDW supports it */ |
1149 | fdwroutine = resultRelInfo->ri_FdwRoutine; |
1150 | switch (operation) |
1151 | { |
1152 | case CMD_INSERT: |
1153 | if (fdwroutine->ExecForeignInsert == NULL) |
1154 | ereport(ERROR, |
1155 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
1156 | errmsg("cannot insert into foreign table \"%s\"" , |
1157 | RelationGetRelationName(resultRel)))); |
1158 | if (fdwroutine->IsForeignRelUpdatable != NULL && |
1159 | (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0) |
1160 | ereport(ERROR, |
1161 | (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
1162 | errmsg("foreign table \"%s\" does not allow inserts" , |
1163 | RelationGetRelationName(resultRel)))); |
1164 | break; |
1165 | case CMD_UPDATE: |
1166 | if (fdwroutine->ExecForeignUpdate == NULL) |
1167 | ereport(ERROR, |
1168 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
1169 | errmsg("cannot update foreign table \"%s\"" , |
1170 | RelationGetRelationName(resultRel)))); |
1171 | if (fdwroutine->IsForeignRelUpdatable != NULL && |
1172 | (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0) |
1173 | ereport(ERROR, |
1174 | (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
1175 | errmsg("foreign table \"%s\" does not allow updates" , |
1176 | RelationGetRelationName(resultRel)))); |
1177 | break; |
1178 | case CMD_DELETE: |
1179 | if (fdwroutine->ExecForeignDelete == NULL) |
1180 | ereport(ERROR, |
1181 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
1182 | errmsg("cannot delete from foreign table \"%s\"" , |
1183 | RelationGetRelationName(resultRel)))); |
1184 | if (fdwroutine->IsForeignRelUpdatable != NULL && |
1185 | (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0) |
1186 | ereport(ERROR, |
1187 | (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
1188 | errmsg("foreign table \"%s\" does not allow deletes" , |
1189 | RelationGetRelationName(resultRel)))); |
1190 | break; |
1191 | default: |
1192 | elog(ERROR, "unrecognized CmdType: %d" , (int) operation); |
1193 | break; |
1194 | } |
1195 | break; |
1196 | default: |
1197 | ereport(ERROR, |
1198 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1199 | errmsg("cannot change relation \"%s\"" , |
1200 | RelationGetRelationName(resultRel)))); |
1201 | break; |
1202 | } |
1203 | } |
1204 | |
1205 | /* |
1206 | * Check that a proposed rowmark target relation is a legal target |
1207 | * |
1208 | * In most cases parser and/or planner should have noticed this already, but |
1209 | * they don't cover all cases. |
1210 | */ |
1211 | static void |
1212 | CheckValidRowMarkRel(Relation rel, RowMarkType markType) |
1213 | { |
1214 | FdwRoutine *fdwroutine; |
1215 | |
1216 | switch (rel->rd_rel->relkind) |
1217 | { |
1218 | case RELKIND_RELATION: |
1219 | case RELKIND_PARTITIONED_TABLE: |
1220 | /* OK */ |
1221 | break; |
1222 | case RELKIND_SEQUENCE: |
1223 | /* Must disallow this because we don't vacuum sequences */ |
1224 | ereport(ERROR, |
1225 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1226 | errmsg("cannot lock rows in sequence \"%s\"" , |
1227 | RelationGetRelationName(rel)))); |
1228 | break; |
1229 | case RELKIND_TOASTVALUE: |
1230 | /* We could allow this, but there seems no good reason to */ |
1231 | ereport(ERROR, |
1232 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1233 | errmsg("cannot lock rows in TOAST relation \"%s\"" , |
1234 | RelationGetRelationName(rel)))); |
1235 | break; |
1236 | case RELKIND_VIEW: |
1237 | /* Should not get here; planner should have expanded the view */ |
1238 | ereport(ERROR, |
1239 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1240 | errmsg("cannot lock rows in view \"%s\"" , |
1241 | RelationGetRelationName(rel)))); |
1242 | break; |
1243 | case RELKIND_MATVIEW: |
1244 | /* Allow referencing a matview, but not actual locking clauses */ |
1245 | if (markType != ROW_MARK_REFERENCE) |
1246 | ereport(ERROR, |
1247 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1248 | errmsg("cannot lock rows in materialized view \"%s\"" , |
1249 | RelationGetRelationName(rel)))); |
1250 | break; |
1251 | case RELKIND_FOREIGN_TABLE: |
1252 | /* Okay only if the FDW supports it */ |
1253 | fdwroutine = GetFdwRoutineForRelation(rel, false); |
1254 | if (fdwroutine->RefetchForeignRow == NULL) |
1255 | ereport(ERROR, |
1256 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
1257 | errmsg("cannot lock rows in foreign table \"%s\"" , |
1258 | RelationGetRelationName(rel)))); |
1259 | break; |
1260 | default: |
1261 | ereport(ERROR, |
1262 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1263 | errmsg("cannot lock rows in relation \"%s\"" , |
1264 | RelationGetRelationName(rel)))); |
1265 | break; |
1266 | } |
1267 | } |
1268 | |
1269 | /* |
1270 | * Initialize ResultRelInfo data for one result relation |
1271 | * |
1272 | * Caution: before Postgres 9.1, this function included the relkind checking |
1273 | * that's now in CheckValidResultRel, and it also did ExecOpenIndices if |
1274 | * appropriate. Be sure callers cover those needs. |
1275 | */ |
1276 | void |
1277 | InitResultRelInfo(ResultRelInfo *resultRelInfo, |
1278 | Relation resultRelationDesc, |
1279 | Index resultRelationIndex, |
1280 | Relation partition_root, |
1281 | int instrument_options) |
1282 | { |
1283 | List *partition_check = NIL; |
1284 | |
1285 | MemSet(resultRelInfo, 0, sizeof(ResultRelInfo)); |
1286 | resultRelInfo->type = T_ResultRelInfo; |
1287 | resultRelInfo->ri_RangeTableIndex = resultRelationIndex; |
1288 | resultRelInfo->ri_RelationDesc = resultRelationDesc; |
1289 | resultRelInfo->ri_NumIndices = 0; |
1290 | resultRelInfo->ri_IndexRelationDescs = NULL; |
1291 | resultRelInfo->ri_IndexRelationInfo = NULL; |
1292 | /* make a copy so as not to depend on relcache info not changing... */ |
1293 | resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc); |
1294 | if (resultRelInfo->ri_TrigDesc) |
1295 | { |
1296 | int n = resultRelInfo->ri_TrigDesc->numtriggers; |
1297 | |
1298 | resultRelInfo->ri_TrigFunctions = (FmgrInfo *) |
1299 | palloc0(n * sizeof(FmgrInfo)); |
1300 | resultRelInfo->ri_TrigWhenExprs = (ExprState **) |
1301 | palloc0(n * sizeof(ExprState *)); |
1302 | if (instrument_options) |
1303 | resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options); |
1304 | } |
1305 | else |
1306 | { |
1307 | resultRelInfo->ri_TrigFunctions = NULL; |
1308 | resultRelInfo->ri_TrigWhenExprs = NULL; |
1309 | resultRelInfo->ri_TrigInstrument = NULL; |
1310 | } |
1311 | if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE) |
1312 | resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true); |
1313 | else |
1314 | resultRelInfo->ri_FdwRoutine = NULL; |
1315 | |
1316 | /* The following fields are set later if needed */ |
1317 | resultRelInfo->ri_FdwState = NULL; |
1318 | resultRelInfo->ri_usesFdwDirectModify = false; |
1319 | resultRelInfo->ri_ConstraintExprs = NULL; |
1320 | resultRelInfo->ri_GeneratedExprs = NULL; |
1321 | resultRelInfo->ri_junkFilter = NULL; |
1322 | resultRelInfo->ri_projectReturning = NULL; |
1323 | resultRelInfo->ri_onConflictArbiterIndexes = NIL; |
1324 | resultRelInfo->ri_onConflict = NULL; |
1325 | resultRelInfo->ri_ReturningSlot = NULL; |
1326 | resultRelInfo->ri_TrigOldSlot = NULL; |
1327 | resultRelInfo->ri_TrigNewSlot = NULL; |
1328 | |
1329 | /* |
1330 | * Partition constraint, which also includes the partition constraint of |
1331 | * all the ancestors that are partitions. Note that it will be checked |
1332 | * even in the case of tuple-routing where this table is the target leaf |
1333 | * partition, if there any BR triggers defined on the table. Although |
1334 | * tuple-routing implicitly preserves the partition constraint of the |
1335 | * target partition for a given row, the BR triggers may change the row |
1336 | * such that the constraint is no longer satisfied, which we must fail for |
1337 | * by checking it explicitly. |
1338 | * |
1339 | * If this is a partitioned table, the partition constraint (if any) of a |
1340 | * given row will be checked just before performing tuple-routing. |
1341 | */ |
1342 | partition_check = RelationGetPartitionQual(resultRelationDesc); |
1343 | |
1344 | resultRelInfo->ri_PartitionCheck = partition_check; |
1345 | resultRelInfo->ri_PartitionRoot = partition_root; |
1346 | resultRelInfo->ri_PartitionInfo = NULL; /* may be set later */ |
1347 | resultRelInfo->ri_CopyMultiInsertBuffer = NULL; |
1348 | } |
1349 | |
1350 | /* |
1351 | * ExecGetTriggerResultRel |
1352 | * Get a ResultRelInfo for a trigger target relation. |
1353 | * |
1354 | * Most of the time, triggers are fired on one of the result relations of the |
1355 | * query, and so we can just return a member of the es_result_relations array, |
1356 | * or the es_root_result_relations array (if any), or the |
1357 | * es_tuple_routing_result_relations list (if any). (Note: in self-join |
1358 | * situations there might be multiple members with the same OID; if so it |
1359 | * doesn't matter which one we pick.) |
1360 | * |
1361 | * However, it is sometimes necessary to fire triggers on other relations; |
1362 | * this happens mainly when an RI update trigger queues additional triggers |
1363 | * on other relations, which will be processed in the context of the outer |
1364 | * query. For efficiency's sake, we want to have a ResultRelInfo for those |
1365 | * triggers too; that can avoid repeated re-opening of the relation. (It |
1366 | * also provides a way for EXPLAIN ANALYZE to report the runtimes of such |
1367 | * triggers.) So we make additional ResultRelInfo's as needed, and save them |
1368 | * in es_trig_target_relations. |
1369 | */ |
1370 | ResultRelInfo * |
1371 | ExecGetTriggerResultRel(EState *estate, Oid relid) |
1372 | { |
1373 | ResultRelInfo *rInfo; |
1374 | int nr; |
1375 | ListCell *l; |
1376 | Relation rel; |
1377 | MemoryContext oldcontext; |
1378 | |
1379 | /* First, search through the query result relations */ |
1380 | rInfo = estate->es_result_relations; |
1381 | nr = estate->es_num_result_relations; |
1382 | while (nr > 0) |
1383 | { |
1384 | if (RelationGetRelid(rInfo->ri_RelationDesc) == relid) |
1385 | return rInfo; |
1386 | rInfo++; |
1387 | nr--; |
1388 | } |
1389 | /* Second, search through the root result relations, if any */ |
1390 | rInfo = estate->es_root_result_relations; |
1391 | nr = estate->es_num_root_result_relations; |
1392 | while (nr > 0) |
1393 | { |
1394 | if (RelationGetRelid(rInfo->ri_RelationDesc) == relid) |
1395 | return rInfo; |
1396 | rInfo++; |
1397 | nr--; |
1398 | } |
1399 | |
1400 | /* |
1401 | * Third, search through the result relations that were created during |
1402 | * tuple routing, if any. |
1403 | */ |
1404 | foreach(l, estate->es_tuple_routing_result_relations) |
1405 | { |
1406 | rInfo = (ResultRelInfo *) lfirst(l); |
1407 | if (RelationGetRelid(rInfo->ri_RelationDesc) == relid) |
1408 | return rInfo; |
1409 | } |
1410 | |
1411 | /* Nope, but maybe we already made an extra ResultRelInfo for it */ |
1412 | foreach(l, estate->es_trig_target_relations) |
1413 | { |
1414 | rInfo = (ResultRelInfo *) lfirst(l); |
1415 | if (RelationGetRelid(rInfo->ri_RelationDesc) == relid) |
1416 | return rInfo; |
1417 | } |
1418 | /* Nope, so we need a new one */ |
1419 | |
1420 | /* |
1421 | * Open the target relation's relcache entry. We assume that an |
1422 | * appropriate lock is still held by the backend from whenever the trigger |
1423 | * event got queued, so we need take no new lock here. Also, we need not |
1424 | * recheck the relkind, so no need for CheckValidResultRel. |
1425 | */ |
1426 | rel = table_open(relid, NoLock); |
1427 | |
1428 | /* |
1429 | * Make the new entry in the right context. |
1430 | */ |
1431 | oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); |
1432 | rInfo = makeNode(ResultRelInfo); |
1433 | InitResultRelInfo(rInfo, |
1434 | rel, |
1435 | 0, /* dummy rangetable index */ |
1436 | NULL, |
1437 | estate->es_instrument); |
1438 | estate->es_trig_target_relations = |
1439 | lappend(estate->es_trig_target_relations, rInfo); |
1440 | MemoryContextSwitchTo(oldcontext); |
1441 | |
1442 | /* |
1443 | * Currently, we don't need any index information in ResultRelInfos used |
1444 | * only for triggers, so no need to call ExecOpenIndices. |
1445 | */ |
1446 | |
1447 | return rInfo; |
1448 | } |
1449 | |
1450 | /* |
1451 | * Close any relations that have been opened by ExecGetTriggerResultRel(). |
1452 | */ |
1453 | void |
1454 | ExecCleanUpTriggerState(EState *estate) |
1455 | { |
1456 | ListCell *l; |
1457 | |
1458 | foreach(l, estate->es_trig_target_relations) |
1459 | { |
1460 | ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l); |
1461 | |
1462 | /* |
1463 | * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we |
1464 | * might be issuing a duplicate close against a Relation opened by |
1465 | * ExecGetRangeTableRelation. |
1466 | */ |
1467 | Assert(resultRelInfo->ri_RangeTableIndex == 0); |
1468 | |
1469 | /* |
1470 | * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for |
1471 | * these rels, we needn't call ExecCloseIndices either. |
1472 | */ |
1473 | Assert(resultRelInfo->ri_NumIndices == 0); |
1474 | |
1475 | table_close(resultRelInfo->ri_RelationDesc, NoLock); |
1476 | } |
1477 | } |
1478 | |
1479 | /* ---------------------------------------------------------------- |
1480 | * ExecPostprocessPlan |
1481 | * |
1482 | * Give plan nodes a final chance to execute before shutdown |
1483 | * ---------------------------------------------------------------- |
1484 | */ |
1485 | static void |
1486 | ExecPostprocessPlan(EState *estate) |
1487 | { |
1488 | ListCell *lc; |
1489 | |
1490 | /* |
1491 | * Make sure nodes run forward. |
1492 | */ |
1493 | estate->es_direction = ForwardScanDirection; |
1494 | |
1495 | /* |
1496 | * Run any secondary ModifyTable nodes to completion, in case the main |
1497 | * query did not fetch all rows from them. (We do this to ensure that |
1498 | * such nodes have predictable results.) |
1499 | */ |
1500 | foreach(lc, estate->es_auxmodifytables) |
1501 | { |
1502 | PlanState *ps = (PlanState *) lfirst(lc); |
1503 | |
1504 | for (;;) |
1505 | { |
1506 | TupleTableSlot *slot; |
1507 | |
1508 | /* Reset the per-output-tuple exprcontext each time */ |
1509 | ResetPerTupleExprContext(estate); |
1510 | |
1511 | slot = ExecProcNode(ps); |
1512 | |
1513 | if (TupIsNull(slot)) |
1514 | break; |
1515 | } |
1516 | } |
1517 | } |
1518 | |
1519 | /* ---------------------------------------------------------------- |
1520 | * ExecEndPlan |
1521 | * |
1522 | * Cleans up the query plan -- closes files and frees up storage |
1523 | * |
1524 | * NOTE: we are no longer very worried about freeing storage per se |
1525 | * in this code; FreeExecutorState should be guaranteed to release all |
1526 | * memory that needs to be released. What we are worried about doing |
1527 | * is closing relations and dropping buffer pins. Thus, for example, |
1528 | * tuple tables must be cleared or dropped to ensure pins are released. |
1529 | * ---------------------------------------------------------------- |
1530 | */ |
1531 | static void |
1532 | ExecEndPlan(PlanState *planstate, EState *estate) |
1533 | { |
1534 | ResultRelInfo *resultRelInfo; |
1535 | Index num_relations; |
1536 | Index i; |
1537 | ListCell *l; |
1538 | |
1539 | /* |
1540 | * shut down the node-type-specific query processing |
1541 | */ |
1542 | ExecEndNode(planstate); |
1543 | |
1544 | /* |
1545 | * for subplans too |
1546 | */ |
1547 | foreach(l, estate->es_subplanstates) |
1548 | { |
1549 | PlanState *subplanstate = (PlanState *) lfirst(l); |
1550 | |
1551 | ExecEndNode(subplanstate); |
1552 | } |
1553 | |
1554 | /* |
1555 | * destroy the executor's tuple table. Actually we only care about |
1556 | * releasing buffer pins and tupdesc refcounts; there's no need to pfree |
1557 | * the TupleTableSlots, since the containing memory context is about to go |
1558 | * away anyway. |
1559 | */ |
1560 | ExecResetTupleTable(estate->es_tupleTable, false); |
1561 | |
1562 | /* |
1563 | * close indexes of result relation(s) if any. (Rels themselves get |
1564 | * closed next.) |
1565 | */ |
1566 | resultRelInfo = estate->es_result_relations; |
1567 | for (i = estate->es_num_result_relations; i > 0; i--) |
1568 | { |
1569 | ExecCloseIndices(resultRelInfo); |
1570 | resultRelInfo++; |
1571 | } |
1572 | |
1573 | /* |
1574 | * close whatever rangetable Relations have been opened. We do not |
1575 | * release any locks we might hold on those rels. |
1576 | */ |
1577 | num_relations = estate->es_range_table_size; |
1578 | for (i = 0; i < num_relations; i++) |
1579 | { |
1580 | if (estate->es_relations[i]) |
1581 | table_close(estate->es_relations[i], NoLock); |
1582 | } |
1583 | |
1584 | /* likewise close any trigger target relations */ |
1585 | ExecCleanUpTriggerState(estate); |
1586 | } |
1587 | |
1588 | /* ---------------------------------------------------------------- |
1589 | * ExecutePlan |
1590 | * |
1591 | * Processes the query plan until we have retrieved 'numberTuples' tuples, |
1592 | * moving in the specified direction. |
1593 | * |
1594 | * Runs to completion if numberTuples is 0 |
1595 | * |
1596 | * Note: the ctid attribute is a 'junk' attribute that is removed before the |
1597 | * user can see it |
1598 | * ---------------------------------------------------------------- |
1599 | */ |
1600 | static void |
1601 | ExecutePlan(EState *estate, |
1602 | PlanState *planstate, |
1603 | bool use_parallel_mode, |
1604 | CmdType operation, |
1605 | bool sendTuples, |
1606 | uint64 numberTuples, |
1607 | ScanDirection direction, |
1608 | DestReceiver *dest, |
1609 | bool execute_once) |
1610 | { |
1611 | TupleTableSlot *slot; |
1612 | uint64 current_tuple_count; |
1613 | |
1614 | /* |
1615 | * initialize local variables |
1616 | */ |
1617 | current_tuple_count = 0; |
1618 | |
1619 | /* |
1620 | * Set the direction. |
1621 | */ |
1622 | estate->es_direction = direction; |
1623 | |
1624 | /* |
1625 | * If the plan might potentially be executed multiple times, we must force |
1626 | * it to run without parallelism, because we might exit early. |
1627 | */ |
1628 | if (!execute_once) |
1629 | use_parallel_mode = false; |
1630 | |
1631 | estate->es_use_parallel_mode = use_parallel_mode; |
1632 | if (use_parallel_mode) |
1633 | EnterParallelMode(); |
1634 | |
1635 | /* |
1636 | * Loop until we've processed the proper number of tuples from the plan. |
1637 | */ |
1638 | for (;;) |
1639 | { |
1640 | /* Reset the per-output-tuple exprcontext */ |
1641 | ResetPerTupleExprContext(estate); |
1642 | |
1643 | /* |
1644 | * Execute the plan and obtain a tuple |
1645 | */ |
1646 | slot = ExecProcNode(planstate); |
1647 | |
1648 | /* |
1649 | * if the tuple is null, then we assume there is nothing more to |
1650 | * process so we just end the loop... |
1651 | */ |
1652 | if (TupIsNull(slot)) |
1653 | { |
1654 | /* |
1655 | * If we know we won't need to back up, we can release resources |
1656 | * at this point. |
1657 | */ |
1658 | if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD)) |
1659 | (void) ExecShutdownNode(planstate); |
1660 | break; |
1661 | } |
1662 | |
1663 | /* |
1664 | * If we have a junk filter, then project a new tuple with the junk |
1665 | * removed. |
1666 | * |
1667 | * Store this new "clean" tuple in the junkfilter's resultSlot. |
1668 | * (Formerly, we stored it back over the "dirty" tuple, which is WRONG |
1669 | * because that tuple slot has the wrong descriptor.) |
1670 | */ |
1671 | if (estate->es_junkFilter != NULL) |
1672 | slot = ExecFilterJunk(estate->es_junkFilter, slot); |
1673 | |
1674 | /* |
1675 | * If we are supposed to send the tuple somewhere, do so. (In |
1676 | * practice, this is probably always the case at this point.) |
1677 | */ |
1678 | if (sendTuples) |
1679 | { |
1680 | /* |
1681 | * If we are not able to send the tuple, we assume the destination |
1682 | * has closed and no more tuples can be sent. If that's the case, |
1683 | * end the loop. |
1684 | */ |
1685 | if (!dest->receiveSlot(slot, dest)) |
1686 | break; |
1687 | } |
1688 | |
1689 | /* |
1690 | * Count tuples processed, if this is a SELECT. (For other operation |
1691 | * types, the ModifyTable plan node must count the appropriate |
1692 | * events.) |
1693 | */ |
1694 | if (operation == CMD_SELECT) |
1695 | (estate->es_processed)++; |
1696 | |
1697 | /* |
1698 | * check our tuple count.. if we've processed the proper number then |
1699 | * quit, else loop again and process more tuples. Zero numberTuples |
1700 | * means no limit. |
1701 | */ |
1702 | current_tuple_count++; |
1703 | if (numberTuples && numberTuples == current_tuple_count) |
1704 | { |
1705 | /* |
1706 | * If we know we won't need to back up, we can release resources |
1707 | * at this point. |
1708 | */ |
1709 | if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD)) |
1710 | (void) ExecShutdownNode(planstate); |
1711 | break; |
1712 | } |
1713 | } |
1714 | |
1715 | if (use_parallel_mode) |
1716 | ExitParallelMode(); |
1717 | } |
1718 | |
1719 | |
1720 | /* |
1721 | * ExecRelCheck --- check that tuple meets constraints for result relation |
1722 | * |
1723 | * Returns NULL if OK, else name of failed check constraint |
1724 | */ |
1725 | static const char * |
1726 | ExecRelCheck(ResultRelInfo *resultRelInfo, |
1727 | TupleTableSlot *slot, EState *estate) |
1728 | { |
1729 | Relation rel = resultRelInfo->ri_RelationDesc; |
1730 | int ncheck = rel->rd_att->constr->num_check; |
1731 | ConstrCheck *check = rel->rd_att->constr->check; |
1732 | ExprContext *econtext; |
1733 | MemoryContext oldContext; |
1734 | int i; |
1735 | |
1736 | /* |
1737 | * If first time through for this result relation, build expression |
1738 | * nodetrees for rel's constraint expressions. Keep them in the per-query |
1739 | * memory context so they'll survive throughout the query. |
1740 | */ |
1741 | if (resultRelInfo->ri_ConstraintExprs == NULL) |
1742 | { |
1743 | oldContext = MemoryContextSwitchTo(estate->es_query_cxt); |
1744 | resultRelInfo->ri_ConstraintExprs = |
1745 | (ExprState **) palloc(ncheck * sizeof(ExprState *)); |
1746 | for (i = 0; i < ncheck; i++) |
1747 | { |
1748 | Expr *checkconstr; |
1749 | |
1750 | checkconstr = stringToNode(check[i].ccbin); |
1751 | resultRelInfo->ri_ConstraintExprs[i] = |
1752 | ExecPrepareExpr(checkconstr, estate); |
1753 | } |
1754 | MemoryContextSwitchTo(oldContext); |
1755 | } |
1756 | |
1757 | /* |
1758 | * We will use the EState's per-tuple context for evaluating constraint |
1759 | * expressions (creating it if it's not already there). |
1760 | */ |
1761 | econtext = GetPerTupleExprContext(estate); |
1762 | |
1763 | /* Arrange for econtext's scan tuple to be the tuple under test */ |
1764 | econtext->ecxt_scantuple = slot; |
1765 | |
1766 | /* And evaluate the constraints */ |
1767 | for (i = 0; i < ncheck; i++) |
1768 | { |
1769 | ExprState *checkconstr = resultRelInfo->ri_ConstraintExprs[i]; |
1770 | |
1771 | /* |
1772 | * NOTE: SQL specifies that a NULL result from a constraint expression |
1773 | * is not to be treated as a failure. Therefore, use ExecCheck not |
1774 | * ExecQual. |
1775 | */ |
1776 | if (!ExecCheck(checkconstr, econtext)) |
1777 | return check[i].ccname; |
1778 | } |
1779 | |
1780 | /* NULL result means no error */ |
1781 | return NULL; |
1782 | } |
1783 | |
1784 | /* |
1785 | * ExecPartitionCheck --- check that tuple meets the partition constraint. |
1786 | * |
1787 | * Returns true if it meets the partition constraint. If the constraint |
1788 | * fails and we're asked to emit to error, do so and don't return; otherwise |
1789 | * return false. |
1790 | */ |
1791 | bool |
1792 | ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, |
1793 | EState *estate, bool emitError) |
1794 | { |
1795 | ExprContext *econtext; |
1796 | bool success; |
1797 | |
1798 | /* |
1799 | * If first time through, build expression state tree for the partition |
1800 | * check expression. Keep it in the per-query memory context so they'll |
1801 | * survive throughout the query. |
1802 | */ |
1803 | if (resultRelInfo->ri_PartitionCheckExpr == NULL) |
1804 | { |
1805 | List *qual = resultRelInfo->ri_PartitionCheck; |
1806 | |
1807 | resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate); |
1808 | } |
1809 | |
1810 | /* |
1811 | * We will use the EState's per-tuple context for evaluating constraint |
1812 | * expressions (creating it if it's not already there). |
1813 | */ |
1814 | econtext = GetPerTupleExprContext(estate); |
1815 | |
1816 | /* Arrange for econtext's scan tuple to be the tuple under test */ |
1817 | econtext->ecxt_scantuple = slot; |
1818 | |
1819 | /* |
1820 | * As in case of the catalogued constraints, we treat a NULL result as |
1821 | * success here, not a failure. |
1822 | */ |
1823 | success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext); |
1824 | |
1825 | /* if asked to emit error, don't actually return on failure */ |
1826 | if (!success && emitError) |
1827 | ExecPartitionCheckEmitError(resultRelInfo, slot, estate); |
1828 | |
1829 | return success; |
1830 | } |
1831 | |
1832 | /* |
1833 | * ExecPartitionCheckEmitError - Form and emit an error message after a failed |
1834 | * partition constraint check. |
1835 | */ |
1836 | void |
1837 | ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, |
1838 | TupleTableSlot *slot, |
1839 | EState *estate) |
1840 | { |
1841 | Oid root_relid; |
1842 | TupleDesc tupdesc; |
1843 | char *val_desc; |
1844 | Bitmapset *modifiedCols; |
1845 | |
1846 | /* |
1847 | * If the tuple has been routed, it's been converted to the partition's |
1848 | * rowtype, which might differ from the root table's. We must convert it |
1849 | * back to the root table's rowtype so that val_desc in the error message |
1850 | * matches the input tuple. |
1851 | */ |
1852 | if (resultRelInfo->ri_PartitionRoot) |
1853 | { |
1854 | TupleDesc old_tupdesc; |
1855 | AttrNumber *map; |
1856 | |
1857 | root_relid = RelationGetRelid(resultRelInfo->ri_PartitionRoot); |
1858 | tupdesc = RelationGetDescr(resultRelInfo->ri_PartitionRoot); |
1859 | |
1860 | old_tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc); |
1861 | /* a reverse map */ |
1862 | map = convert_tuples_by_name_map_if_req(old_tupdesc, tupdesc, |
1863 | gettext_noop("could not convert row type" )); |
1864 | |
1865 | /* |
1866 | * Partition-specific slot's tupdesc can't be changed, so allocate a |
1867 | * new one. |
1868 | */ |
1869 | if (map != NULL) |
1870 | slot = execute_attr_map_slot(map, slot, |
1871 | MakeTupleTableSlot(tupdesc, &TTSOpsVirtual)); |
1872 | } |
1873 | else |
1874 | { |
1875 | root_relid = RelationGetRelid(resultRelInfo->ri_RelationDesc); |
1876 | tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc); |
1877 | } |
1878 | |
1879 | modifiedCols = bms_union(GetInsertedColumns(resultRelInfo, estate), |
1880 | GetUpdatedColumns(resultRelInfo, estate)); |
1881 | |
1882 | val_desc = ExecBuildSlotValueDescription(root_relid, |
1883 | slot, |
1884 | tupdesc, |
1885 | modifiedCols, |
1886 | 64); |
1887 | ereport(ERROR, |
1888 | (errcode(ERRCODE_CHECK_VIOLATION), |
1889 | errmsg("new row for relation \"%s\" violates partition constraint" , |
1890 | RelationGetRelationName(resultRelInfo->ri_RelationDesc)), |
1891 | val_desc ? errdetail("Failing row contains %s." , val_desc) : 0)); |
1892 | } |
1893 | |
1894 | /* |
1895 | * ExecConstraints - check constraints of the tuple in 'slot' |
1896 | * |
1897 | * This checks the traditional NOT NULL and check constraints. |
1898 | * |
1899 | * The partition constraint is *NOT* checked. |
1900 | * |
1901 | * Note: 'slot' contains the tuple to check the constraints of, which may |
1902 | * have been converted from the original input tuple after tuple routing. |
1903 | * 'resultRelInfo' is the final result relation, after tuple routing. |
1904 | */ |
1905 | void |
1906 | ExecConstraints(ResultRelInfo *resultRelInfo, |
1907 | TupleTableSlot *slot, EState *estate) |
1908 | { |
1909 | Relation rel = resultRelInfo->ri_RelationDesc; |
1910 | TupleDesc tupdesc = RelationGetDescr(rel); |
1911 | TupleConstr *constr = tupdesc->constr; |
1912 | Bitmapset *modifiedCols; |
1913 | Bitmapset *insertedCols; |
1914 | Bitmapset *updatedCols; |
1915 | |
1916 | Assert(constr || resultRelInfo->ri_PartitionCheck); |
1917 | |
1918 | if (constr && constr->has_not_null) |
1919 | { |
1920 | int natts = tupdesc->natts; |
1921 | int attrChk; |
1922 | |
1923 | for (attrChk = 1; attrChk <= natts; attrChk++) |
1924 | { |
1925 | Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1); |
1926 | |
1927 | if (att->attnotnull && slot_attisnull(slot, attrChk)) |
1928 | { |
1929 | char *val_desc; |
1930 | Relation orig_rel = rel; |
1931 | TupleDesc orig_tupdesc = RelationGetDescr(rel); |
1932 | |
1933 | /* |
1934 | * If the tuple has been routed, it's been converted to the |
1935 | * partition's rowtype, which might differ from the root |
1936 | * table's. We must convert it back to the root table's |
1937 | * rowtype so that val_desc shown error message matches the |
1938 | * input tuple. |
1939 | */ |
1940 | if (resultRelInfo->ri_PartitionRoot) |
1941 | { |
1942 | AttrNumber *map; |
1943 | |
1944 | rel = resultRelInfo->ri_PartitionRoot; |
1945 | tupdesc = RelationGetDescr(rel); |
1946 | /* a reverse map */ |
1947 | map = convert_tuples_by_name_map_if_req(orig_tupdesc, |
1948 | tupdesc, |
1949 | gettext_noop("could not convert row type" )); |
1950 | |
1951 | /* |
1952 | * Partition-specific slot's tupdesc can't be changed, so |
1953 | * allocate a new one. |
1954 | */ |
1955 | if (map != NULL) |
1956 | slot = execute_attr_map_slot(map, slot, |
1957 | MakeTupleTableSlot(tupdesc, &TTSOpsVirtual)); |
1958 | } |
1959 | |
1960 | insertedCols = GetInsertedColumns(resultRelInfo, estate); |
1961 | updatedCols = GetUpdatedColumns(resultRelInfo, estate); |
1962 | modifiedCols = bms_union(insertedCols, updatedCols); |
1963 | val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel), |
1964 | slot, |
1965 | tupdesc, |
1966 | modifiedCols, |
1967 | 64); |
1968 | |
1969 | ereport(ERROR, |
1970 | (errcode(ERRCODE_NOT_NULL_VIOLATION), |
1971 | errmsg("null value in column \"%s\" violates not-null constraint" , |
1972 | NameStr(att->attname)), |
1973 | val_desc ? errdetail("Failing row contains %s." , val_desc) : 0, |
1974 | errtablecol(orig_rel, attrChk))); |
1975 | } |
1976 | } |
1977 | } |
1978 | |
1979 | if (constr && constr->num_check > 0) |
1980 | { |
1981 | const char *failed; |
1982 | |
1983 | if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL) |
1984 | { |
1985 | char *val_desc; |
1986 | Relation orig_rel = rel; |
1987 | |
1988 | /* See the comment above. */ |
1989 | if (resultRelInfo->ri_PartitionRoot) |
1990 | { |
1991 | TupleDesc old_tupdesc = RelationGetDescr(rel); |
1992 | AttrNumber *map; |
1993 | |
1994 | rel = resultRelInfo->ri_PartitionRoot; |
1995 | tupdesc = RelationGetDescr(rel); |
1996 | /* a reverse map */ |
1997 | map = convert_tuples_by_name_map_if_req(old_tupdesc, |
1998 | tupdesc, |
1999 | gettext_noop("could not convert row type" )); |
2000 | |
2001 | /* |
2002 | * Partition-specific slot's tupdesc can't be changed, so |
2003 | * allocate a new one. |
2004 | */ |
2005 | if (map != NULL) |
2006 | slot = execute_attr_map_slot(map, slot, |
2007 | MakeTupleTableSlot(tupdesc, &TTSOpsVirtual)); |
2008 | } |
2009 | |
2010 | insertedCols = GetInsertedColumns(resultRelInfo, estate); |
2011 | updatedCols = GetUpdatedColumns(resultRelInfo, estate); |
2012 | modifiedCols = bms_union(insertedCols, updatedCols); |
2013 | val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel), |
2014 | slot, |
2015 | tupdesc, |
2016 | modifiedCols, |
2017 | 64); |
2018 | ereport(ERROR, |
2019 | (errcode(ERRCODE_CHECK_VIOLATION), |
2020 | errmsg("new row for relation \"%s\" violates check constraint \"%s\"" , |
2021 | RelationGetRelationName(orig_rel), failed), |
2022 | val_desc ? errdetail("Failing row contains %s." , val_desc) : 0, |
2023 | errtableconstraint(orig_rel, failed))); |
2024 | } |
2025 | } |
2026 | } |
2027 | |
2028 | /* |
2029 | * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs |
2030 | * of the specified kind. |
2031 | * |
2032 | * Note that this needs to be called multiple times to ensure that all kinds of |
2033 | * WITH CHECK OPTIONs are handled (both those from views which have the WITH |
2034 | * CHECK OPTION set and from row level security policies). See ExecInsert() |
2035 | * and ExecUpdate(). |
2036 | */ |
2037 | void |
2038 | ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, |
2039 | TupleTableSlot *slot, EState *estate) |
2040 | { |
2041 | Relation rel = resultRelInfo->ri_RelationDesc; |
2042 | TupleDesc tupdesc = RelationGetDescr(rel); |
2043 | ExprContext *econtext; |
2044 | ListCell *l1, |
2045 | *l2; |
2046 | |
2047 | /* |
2048 | * We will use the EState's per-tuple context for evaluating constraint |
2049 | * expressions (creating it if it's not already there). |
2050 | */ |
2051 | econtext = GetPerTupleExprContext(estate); |
2052 | |
2053 | /* Arrange for econtext's scan tuple to be the tuple under test */ |
2054 | econtext->ecxt_scantuple = slot; |
2055 | |
2056 | /* Check each of the constraints */ |
2057 | forboth(l1, resultRelInfo->ri_WithCheckOptions, |
2058 | l2, resultRelInfo->ri_WithCheckOptionExprs) |
2059 | { |
2060 | WithCheckOption *wco = (WithCheckOption *) lfirst(l1); |
2061 | ExprState *wcoExpr = (ExprState *) lfirst(l2); |
2062 | |
2063 | /* |
2064 | * Skip any WCOs which are not the kind we are looking for at this |
2065 | * time. |
2066 | */ |
2067 | if (wco->kind != kind) |
2068 | continue; |
2069 | |
2070 | /* |
2071 | * WITH CHECK OPTION checks are intended to ensure that the new tuple |
2072 | * is visible (in the case of a view) or that it passes the |
2073 | * 'with-check' policy (in the case of row security). If the qual |
2074 | * evaluates to NULL or FALSE, then the new tuple won't be included in |
2075 | * the view or doesn't pass the 'with-check' policy for the table. |
2076 | */ |
2077 | if (!ExecQual(wcoExpr, econtext)) |
2078 | { |
2079 | char *val_desc; |
2080 | Bitmapset *modifiedCols; |
2081 | Bitmapset *insertedCols; |
2082 | Bitmapset *updatedCols; |
2083 | |
2084 | switch (wco->kind) |
2085 | { |
2086 | /* |
2087 | * For WITH CHECK OPTIONs coming from views, we might be |
2088 | * able to provide the details on the row, depending on |
2089 | * the permissions on the relation (that is, if the user |
2090 | * could view it directly anyway). For RLS violations, we |
2091 | * don't include the data since we don't know if the user |
2092 | * should be able to view the tuple as that depends on the |
2093 | * USING policy. |
2094 | */ |
2095 | case WCO_VIEW_CHECK: |
2096 | /* See the comment in ExecConstraints(). */ |
2097 | if (resultRelInfo->ri_PartitionRoot) |
2098 | { |
2099 | TupleDesc old_tupdesc = RelationGetDescr(rel); |
2100 | AttrNumber *map; |
2101 | |
2102 | rel = resultRelInfo->ri_PartitionRoot; |
2103 | tupdesc = RelationGetDescr(rel); |
2104 | /* a reverse map */ |
2105 | map = convert_tuples_by_name_map_if_req(old_tupdesc, |
2106 | tupdesc, |
2107 | gettext_noop("could not convert row type" )); |
2108 | |
2109 | /* |
2110 | * Partition-specific slot's tupdesc can't be changed, |
2111 | * so allocate a new one. |
2112 | */ |
2113 | if (map != NULL) |
2114 | slot = execute_attr_map_slot(map, slot, |
2115 | MakeTupleTableSlot(tupdesc, &TTSOpsVirtual)); |
2116 | } |
2117 | |
2118 | insertedCols = GetInsertedColumns(resultRelInfo, estate); |
2119 | updatedCols = GetUpdatedColumns(resultRelInfo, estate); |
2120 | modifiedCols = bms_union(insertedCols, updatedCols); |
2121 | val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel), |
2122 | slot, |
2123 | tupdesc, |
2124 | modifiedCols, |
2125 | 64); |
2126 | |
2127 | ereport(ERROR, |
2128 | (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION), |
2129 | errmsg("new row violates check option for view \"%s\"" , |
2130 | wco->relname), |
2131 | val_desc ? errdetail("Failing row contains %s." , |
2132 | val_desc) : 0)); |
2133 | break; |
2134 | case WCO_RLS_INSERT_CHECK: |
2135 | case WCO_RLS_UPDATE_CHECK: |
2136 | if (wco->polname != NULL) |
2137 | ereport(ERROR, |
2138 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
2139 | errmsg("new row violates row-level security policy \"%s\" for table \"%s\"" , |
2140 | wco->polname, wco->relname))); |
2141 | else |
2142 | ereport(ERROR, |
2143 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
2144 | errmsg("new row violates row-level security policy for table \"%s\"" , |
2145 | wco->relname))); |
2146 | break; |
2147 | case WCO_RLS_CONFLICT_CHECK: |
2148 | if (wco->polname != NULL) |
2149 | ereport(ERROR, |
2150 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
2151 | errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"" , |
2152 | wco->polname, wco->relname))); |
2153 | else |
2154 | ereport(ERROR, |
2155 | (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), |
2156 | errmsg("new row violates row-level security policy (USING expression) for table \"%s\"" , |
2157 | wco->relname))); |
2158 | break; |
2159 | default: |
2160 | elog(ERROR, "unrecognized WCO kind: %u" , wco->kind); |
2161 | break; |
2162 | } |
2163 | } |
2164 | } |
2165 | } |
2166 | |
2167 | /* |
2168 | * ExecBuildSlotValueDescription -- construct a string representing a tuple |
2169 | * |
2170 | * This is intentionally very similar to BuildIndexValueDescription, but |
2171 | * unlike that function, we truncate long field values (to at most maxfieldlen |
2172 | * bytes). That seems necessary here since heap field values could be very |
2173 | * long, whereas index entries typically aren't so wide. |
2174 | * |
2175 | * Also, unlike the case with index entries, we need to be prepared to ignore |
2176 | * dropped columns. We used to use the slot's tuple descriptor to decode the |
2177 | * data, but the slot's descriptor doesn't identify dropped columns, so we |
2178 | * now need to be passed the relation's descriptor. |
2179 | * |
2180 | * Note that, like BuildIndexValueDescription, if the user does not have |
2181 | * permission to view any of the columns involved, a NULL is returned. Unlike |
2182 | * BuildIndexValueDescription, if the user has access to view a subset of the |
2183 | * column involved, that subset will be returned with a key identifying which |
2184 | * columns they are. |
2185 | */ |
2186 | static char * |
2187 | ExecBuildSlotValueDescription(Oid reloid, |
2188 | TupleTableSlot *slot, |
2189 | TupleDesc tupdesc, |
2190 | Bitmapset *modifiedCols, |
2191 | int maxfieldlen) |
2192 | { |
2193 | StringInfoData buf; |
2194 | StringInfoData collist; |
2195 | bool write_comma = false; |
2196 | bool write_comma_collist = false; |
2197 | int i; |
2198 | AclResult aclresult; |
2199 | bool table_perm = false; |
2200 | bool any_perm = false; |
2201 | |
2202 | /* |
2203 | * Check if RLS is enabled and should be active for the relation; if so, |
2204 | * then don't return anything. Otherwise, go through normal permission |
2205 | * checks. |
2206 | */ |
2207 | if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED) |
2208 | return NULL; |
2209 | |
2210 | initStringInfo(&buf); |
2211 | |
2212 | appendStringInfoChar(&buf, '('); |
2213 | |
2214 | /* |
2215 | * Check if the user has permissions to see the row. Table-level SELECT |
2216 | * allows access to all columns. If the user does not have table-level |
2217 | * SELECT then we check each column and include those the user has SELECT |
2218 | * rights on. Additionally, we always include columns the user provided |
2219 | * data for. |
2220 | */ |
2221 | aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT); |
2222 | if (aclresult != ACLCHECK_OK) |
2223 | { |
2224 | /* Set up the buffer for the column list */ |
2225 | initStringInfo(&collist); |
2226 | appendStringInfoChar(&collist, '('); |
2227 | } |
2228 | else |
2229 | table_perm = any_perm = true; |
2230 | |
2231 | /* Make sure the tuple is fully deconstructed */ |
2232 | slot_getallattrs(slot); |
2233 | |
2234 | for (i = 0; i < tupdesc->natts; i++) |
2235 | { |
2236 | bool column_perm = false; |
2237 | char *val; |
2238 | int vallen; |
2239 | Form_pg_attribute att = TupleDescAttr(tupdesc, i); |
2240 | |
2241 | /* ignore dropped columns */ |
2242 | if (att->attisdropped) |
2243 | continue; |
2244 | |
2245 | if (!table_perm) |
2246 | { |
2247 | /* |
2248 | * No table-level SELECT, so need to make sure they either have |
2249 | * SELECT rights on the column or that they have provided the data |
2250 | * for the column. If not, omit this column from the error |
2251 | * message. |
2252 | */ |
2253 | aclresult = pg_attribute_aclcheck(reloid, att->attnum, |
2254 | GetUserId(), ACL_SELECT); |
2255 | if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, |
2256 | modifiedCols) || aclresult == ACLCHECK_OK) |
2257 | { |
2258 | column_perm = any_perm = true; |
2259 | |
2260 | if (write_comma_collist) |
2261 | appendStringInfoString(&collist, ", " ); |
2262 | else |
2263 | write_comma_collist = true; |
2264 | |
2265 | appendStringInfoString(&collist, NameStr(att->attname)); |
2266 | } |
2267 | } |
2268 | |
2269 | if (table_perm || column_perm) |
2270 | { |
2271 | if (slot->tts_isnull[i]) |
2272 | val = "null" ; |
2273 | else |
2274 | { |
2275 | Oid foutoid; |
2276 | bool typisvarlena; |
2277 | |
2278 | getTypeOutputInfo(att->atttypid, |
2279 | &foutoid, &typisvarlena); |
2280 | val = OidOutputFunctionCall(foutoid, slot->tts_values[i]); |
2281 | } |
2282 | |
2283 | if (write_comma) |
2284 | appendStringInfoString(&buf, ", " ); |
2285 | else |
2286 | write_comma = true; |
2287 | |
2288 | /* truncate if needed */ |
2289 | vallen = strlen(val); |
2290 | if (vallen <= maxfieldlen) |
2291 | appendStringInfoString(&buf, val); |
2292 | else |
2293 | { |
2294 | vallen = pg_mbcliplen(val, vallen, maxfieldlen); |
2295 | appendBinaryStringInfo(&buf, val, vallen); |
2296 | appendStringInfoString(&buf, "..." ); |
2297 | } |
2298 | } |
2299 | } |
2300 | |
2301 | /* If we end up with zero columns being returned, then return NULL. */ |
2302 | if (!any_perm) |
2303 | return NULL; |
2304 | |
2305 | appendStringInfoChar(&buf, ')'); |
2306 | |
2307 | if (!table_perm) |
2308 | { |
2309 | appendStringInfoString(&collist, ") = " ); |
2310 | appendStringInfoString(&collist, buf.data); |
2311 | |
2312 | return collist.data; |
2313 | } |
2314 | |
2315 | return buf.data; |
2316 | } |
2317 | |
2318 | |
2319 | /* |
2320 | * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a |
2321 | * given ResultRelInfo |
2322 | */ |
2323 | LockTupleMode |
2324 | ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo) |
2325 | { |
2326 | Bitmapset *keyCols; |
2327 | Bitmapset *updatedCols; |
2328 | |
2329 | /* |
2330 | * Compute lock mode to use. If columns that are part of the key have not |
2331 | * been modified, then we can use a weaker lock, allowing for better |
2332 | * concurrency. |
2333 | */ |
2334 | updatedCols = GetAllUpdatedColumns(relinfo, estate); |
2335 | keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc, |
2336 | INDEX_ATTR_BITMAP_KEY); |
2337 | |
2338 | if (bms_overlap(keyCols, updatedCols)) |
2339 | return LockTupleExclusive; |
2340 | |
2341 | return LockTupleNoKeyExclusive; |
2342 | } |
2343 | |
2344 | /* |
2345 | * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index |
2346 | * |
2347 | * If no such struct, either return NULL or throw error depending on missing_ok |
2348 | */ |
2349 | ExecRowMark * |
2350 | ExecFindRowMark(EState *estate, Index rti, bool missing_ok) |
2351 | { |
2352 | if (rti > 0 && rti <= estate->es_range_table_size && |
2353 | estate->es_rowmarks != NULL) |
2354 | { |
2355 | ExecRowMark *erm = estate->es_rowmarks[rti - 1]; |
2356 | |
2357 | if (erm) |
2358 | return erm; |
2359 | } |
2360 | if (!missing_ok) |
2361 | elog(ERROR, "failed to find ExecRowMark for rangetable index %u" , rti); |
2362 | return NULL; |
2363 | } |
2364 | |
2365 | /* |
2366 | * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct |
2367 | * |
2368 | * Inputs are the underlying ExecRowMark struct and the targetlist of the |
2369 | * input plan node (not planstate node!). We need the latter to find out |
2370 | * the column numbers of the resjunk columns. |
2371 | */ |
2372 | ExecAuxRowMark * |
2373 | ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist) |
2374 | { |
2375 | ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark)); |
2376 | char resname[32]; |
2377 | |
2378 | aerm->rowmark = erm; |
2379 | |
2380 | /* Look up the resjunk columns associated with this rowmark */ |
2381 | if (erm->markType != ROW_MARK_COPY) |
2382 | { |
2383 | /* need ctid for all methods other than COPY */ |
2384 | snprintf(resname, sizeof(resname), "ctid%u" , erm->rowmarkId); |
2385 | aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist, |
2386 | resname); |
2387 | if (!AttributeNumberIsValid(aerm->ctidAttNo)) |
2388 | elog(ERROR, "could not find junk %s column" , resname); |
2389 | } |
2390 | else |
2391 | { |
2392 | /* need wholerow if COPY */ |
2393 | snprintf(resname, sizeof(resname), "wholerow%u" , erm->rowmarkId); |
2394 | aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist, |
2395 | resname); |
2396 | if (!AttributeNumberIsValid(aerm->wholeAttNo)) |
2397 | elog(ERROR, "could not find junk %s column" , resname); |
2398 | } |
2399 | |
2400 | /* if child rel, need tableoid */ |
2401 | if (erm->rti != erm->prti) |
2402 | { |
2403 | snprintf(resname, sizeof(resname), "tableoid%u" , erm->rowmarkId); |
2404 | aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist, |
2405 | resname); |
2406 | if (!AttributeNumberIsValid(aerm->toidAttNo)) |
2407 | elog(ERROR, "could not find junk %s column" , resname); |
2408 | } |
2409 | |
2410 | return aerm; |
2411 | } |
2412 | |
2413 | |
2414 | /* |
2415 | * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to |
2416 | * process the updated version under READ COMMITTED rules. |
2417 | * |
2418 | * See backend/executor/README for some info about how this works. |
2419 | */ |
2420 | |
2421 | |
2422 | /* |
2423 | * Check the updated version of a tuple to see if we want to process it under |
2424 | * READ COMMITTED rules. |
2425 | * |
2426 | * epqstate - state for EvalPlanQual rechecking |
2427 | * relation - table containing tuple |
2428 | * rti - rangetable index of table containing tuple |
2429 | * inputslot - tuple for processing - this can be the slot from |
2430 | * EvalPlanQualSlot(), for the increased efficiency. |
2431 | * |
2432 | * This tests whether the tuple in inputslot still matches the relevant |
2433 | * quals. For that result to be useful, typically the input tuple has to be |
2434 | * last row version (otherwise the result isn't particularly useful) and |
2435 | * locked (otherwise the result might be out of date). That's typically |
2436 | * achieved by using table_tuple_lock() with the |
2437 | * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag. |
2438 | * |
2439 | * Returns a slot containing the new candidate update/delete tuple, or |
2440 | * NULL if we determine we shouldn't process the row. |
2441 | */ |
2442 | TupleTableSlot * |
2443 | EvalPlanQual(EPQState *epqstate, Relation relation, |
2444 | Index rti, TupleTableSlot *inputslot) |
2445 | { |
2446 | TupleTableSlot *slot; |
2447 | TupleTableSlot *testslot; |
2448 | |
2449 | Assert(rti > 0); |
2450 | |
2451 | /* |
2452 | * Need to run a recheck subquery. Initialize or reinitialize EPQ state. |
2453 | */ |
2454 | EvalPlanQualBegin(epqstate); |
2455 | |
2456 | /* |
2457 | * Callers will often use the EvalPlanQualSlot to store the tuple to avoid |
2458 | * an unnecessary copy. |
2459 | */ |
2460 | testslot = EvalPlanQualSlot(epqstate, relation, rti); |
2461 | if (testslot != inputslot) |
2462 | ExecCopySlot(testslot, inputslot); |
2463 | |
2464 | /* |
2465 | * Run the EPQ query. We assume it will return at most one tuple. |
2466 | */ |
2467 | slot = EvalPlanQualNext(epqstate); |
2468 | |
2469 | /* |
2470 | * If we got a tuple, force the slot to materialize the tuple so that it |
2471 | * is not dependent on any local state in the EPQ query (in particular, |
2472 | * it's highly likely that the slot contains references to any pass-by-ref |
2473 | * datums that may be present in copyTuple). As with the next step, this |
2474 | * is to guard against early re-use of the EPQ query. |
2475 | */ |
2476 | if (!TupIsNull(slot)) |
2477 | ExecMaterializeSlot(slot); |
2478 | |
2479 | /* |
2480 | * Clear out the test tuple. This is needed in case the EPQ query is |
2481 | * re-used to test a tuple for a different relation. (Not clear that can |
2482 | * really happen, but let's be safe.) |
2483 | */ |
2484 | ExecClearTuple(testslot); |
2485 | |
2486 | return slot; |
2487 | } |
2488 | |
2489 | /* |
2490 | * EvalPlanQualInit -- initialize during creation of a plan state node |
2491 | * that might need to invoke EPQ processing. |
2492 | * |
2493 | * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later |
2494 | * with EvalPlanQualSetPlan. |
2495 | */ |
2496 | void |
2497 | EvalPlanQualInit(EPQState *epqstate, EState *parentestate, |
2498 | Plan *subplan, List *auxrowmarks, int epqParam) |
2499 | { |
2500 | Index rtsize = parentestate->es_range_table_size; |
2501 | |
2502 | /* initialize data not changing over EPQState's lifetime */ |
2503 | epqstate->parentestate = parentestate; |
2504 | epqstate->epqParam = epqParam; |
2505 | |
2506 | /* |
2507 | * Allocate space to reference a slot for each potential rti - do so now |
2508 | * rather than in EvalPlanQualBegin(), as done for other dynamically |
2509 | * allocated resources, so EvalPlanQualSlot() can be used to hold tuples |
2510 | * that *may* need EPQ later, without forcing the overhead of |
2511 | * EvalPlanQualBegin(). |
2512 | */ |
2513 | epqstate->tuple_table = NIL; |
2514 | epqstate->relsubs_slot = (TupleTableSlot **) |
2515 | palloc0(rtsize * sizeof(TupleTableSlot *)); |
2516 | |
2517 | /* ... and remember data that EvalPlanQualBegin will need */ |
2518 | epqstate->plan = subplan; |
2519 | epqstate->arowMarks = auxrowmarks; |
2520 | |
2521 | /* ... and mark the EPQ state inactive */ |
2522 | epqstate->origslot = NULL; |
2523 | epqstate->recheckestate = NULL; |
2524 | epqstate->recheckplanstate = NULL; |
2525 | epqstate->relsubs_rowmark = NULL; |
2526 | epqstate->relsubs_done = NULL; |
2527 | } |
2528 | |
2529 | /* |
2530 | * EvalPlanQualSetPlan -- set or change subplan of an EPQState. |
2531 | * |
2532 | * We need this so that ModifyTable can deal with multiple subplans. |
2533 | */ |
2534 | void |
2535 | EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks) |
2536 | { |
2537 | /* If we have a live EPQ query, shut it down */ |
2538 | EvalPlanQualEnd(epqstate); |
2539 | /* And set/change the plan pointer */ |
2540 | epqstate->plan = subplan; |
2541 | /* The rowmarks depend on the plan, too */ |
2542 | epqstate->arowMarks = auxrowmarks; |
2543 | } |
2544 | |
2545 | /* |
2546 | * Return, and create if necessary, a slot for an EPQ test tuple. |
2547 | * |
2548 | * Note this only requires EvalPlanQualInit() to have been called, |
2549 | * EvalPlanQualBegin() is not necessary. |
2550 | */ |
2551 | TupleTableSlot * |
2552 | EvalPlanQualSlot(EPQState *epqstate, |
2553 | Relation relation, Index rti) |
2554 | { |
2555 | TupleTableSlot **slot; |
2556 | |
2557 | Assert(relation); |
2558 | Assert(rti > 0 && rti <= epqstate->parentestate->es_range_table_size); |
2559 | slot = &epqstate->relsubs_slot[rti - 1]; |
2560 | |
2561 | if (*slot == NULL) |
2562 | { |
2563 | MemoryContext oldcontext; |
2564 | |
2565 | oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt); |
2566 | *slot = table_slot_create(relation, &epqstate->tuple_table); |
2567 | MemoryContextSwitchTo(oldcontext); |
2568 | } |
2569 | |
2570 | return *slot; |
2571 | } |
2572 | |
2573 | /* |
2574 | * Fetch the current row value for a non-locked relation, identified by rti, |
2575 | * that needs to be scanned by an EvalPlanQual operation. origslot must have |
2576 | * been set to contain the current result row (top-level row) that we need to |
2577 | * recheck. Returns true if a substitution tuple was found, false if not. |
2578 | */ |
2579 | bool |
2580 | EvalPlanQualFetchRowMark(EPQState *epqstate, Index rti, TupleTableSlot *slot) |
2581 | { |
2582 | ExecAuxRowMark *earm = epqstate->relsubs_rowmark[rti - 1]; |
2583 | ExecRowMark *erm = earm->rowmark; |
2584 | Datum datum; |
2585 | bool isNull; |
2586 | |
2587 | Assert(earm != NULL); |
2588 | Assert(epqstate->origslot != NULL); |
2589 | |
2590 | if (RowMarkRequiresRowShareLock(erm->markType)) |
2591 | elog(ERROR, "EvalPlanQual doesn't support locking rowmarks" ); |
2592 | |
2593 | /* if child rel, must check whether it produced this row */ |
2594 | if (erm->rti != erm->prti) |
2595 | { |
2596 | Oid tableoid; |
2597 | |
2598 | datum = ExecGetJunkAttribute(epqstate->origslot, |
2599 | earm->toidAttNo, |
2600 | &isNull); |
2601 | /* non-locked rels could be on the inside of outer joins */ |
2602 | if (isNull) |
2603 | return false; |
2604 | |
2605 | tableoid = DatumGetObjectId(datum); |
2606 | |
2607 | Assert(OidIsValid(erm->relid)); |
2608 | if (tableoid != erm->relid) |
2609 | { |
2610 | /* this child is inactive right now */ |
2611 | return false; |
2612 | } |
2613 | } |
2614 | |
2615 | if (erm->markType == ROW_MARK_REFERENCE) |
2616 | { |
2617 | Assert(erm->relation != NULL); |
2618 | |
2619 | /* fetch the tuple's ctid */ |
2620 | datum = ExecGetJunkAttribute(epqstate->origslot, |
2621 | earm->ctidAttNo, |
2622 | &isNull); |
2623 | /* non-locked rels could be on the inside of outer joins */ |
2624 | if (isNull) |
2625 | return false; |
2626 | |
2627 | /* fetch requests on foreign tables must be passed to their FDW */ |
2628 | if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE) |
2629 | { |
2630 | FdwRoutine *fdwroutine; |
2631 | bool updated = false; |
2632 | |
2633 | fdwroutine = GetFdwRoutineForRelation(erm->relation, false); |
2634 | /* this should have been checked already, but let's be safe */ |
2635 | if (fdwroutine->RefetchForeignRow == NULL) |
2636 | ereport(ERROR, |
2637 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
2638 | errmsg("cannot lock rows in foreign table \"%s\"" , |
2639 | RelationGetRelationName(erm->relation)))); |
2640 | |
2641 | fdwroutine->RefetchForeignRow(epqstate->recheckestate, |
2642 | erm, |
2643 | datum, |
2644 | slot, |
2645 | &updated); |
2646 | if (TupIsNull(slot)) |
2647 | elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck" ); |
2648 | |
2649 | /* |
2650 | * Ideally we'd insist on updated == false here, but that assumes |
2651 | * that FDWs can track that exactly, which they might not be able |
2652 | * to. So just ignore the flag. |
2653 | */ |
2654 | return true; |
2655 | } |
2656 | else |
2657 | { |
2658 | /* ordinary table, fetch the tuple */ |
2659 | if (!table_tuple_fetch_row_version(erm->relation, |
2660 | (ItemPointer) DatumGetPointer(datum), |
2661 | SnapshotAny, slot)) |
2662 | elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck" ); |
2663 | return true; |
2664 | } |
2665 | } |
2666 | else |
2667 | { |
2668 | Assert(erm->markType == ROW_MARK_COPY); |
2669 | |
2670 | /* fetch the whole-row Var for the relation */ |
2671 | datum = ExecGetJunkAttribute(epqstate->origslot, |
2672 | earm->wholeAttNo, |
2673 | &isNull); |
2674 | /* non-locked rels could be on the inside of outer joins */ |
2675 | if (isNull) |
2676 | return false; |
2677 | |
2678 | ExecStoreHeapTupleDatum(datum, slot); |
2679 | return true; |
2680 | } |
2681 | } |
2682 | |
2683 | /* |
2684 | * Fetch the next row (if any) from EvalPlanQual testing |
2685 | * |
2686 | * (In practice, there should never be more than one row...) |
2687 | */ |
2688 | TupleTableSlot * |
2689 | EvalPlanQualNext(EPQState *epqstate) |
2690 | { |
2691 | MemoryContext oldcontext; |
2692 | TupleTableSlot *slot; |
2693 | |
2694 | oldcontext = MemoryContextSwitchTo(epqstate->recheckestate->es_query_cxt); |
2695 | slot = ExecProcNode(epqstate->recheckplanstate); |
2696 | MemoryContextSwitchTo(oldcontext); |
2697 | |
2698 | return slot; |
2699 | } |
2700 | |
2701 | /* |
2702 | * Initialize or reset an EvalPlanQual state tree |
2703 | */ |
2704 | void |
2705 | EvalPlanQualBegin(EPQState *epqstate) |
2706 | { |
2707 | EState *parentestate = epqstate->parentestate; |
2708 | EState *recheckestate = epqstate->recheckestate; |
2709 | |
2710 | if (recheckestate == NULL) |
2711 | { |
2712 | /* First time through, so create a child EState */ |
2713 | EvalPlanQualStart(epqstate, epqstate->plan); |
2714 | } |
2715 | else |
2716 | { |
2717 | /* |
2718 | * We already have a suitable child EPQ tree, so just reset it. |
2719 | */ |
2720 | Index rtsize = parentestate->es_range_table_size; |
2721 | PlanState *rcplanstate = epqstate->recheckplanstate; |
2722 | |
2723 | MemSet(epqstate->relsubs_done, 0, rtsize * sizeof(bool)); |
2724 | |
2725 | /* Recopy current values of parent parameters */ |
2726 | if (parentestate->es_plannedstmt->paramExecTypes != NIL) |
2727 | { |
2728 | int i; |
2729 | |
2730 | /* |
2731 | * Force evaluation of any InitPlan outputs that could be needed |
2732 | * by the subplan, just in case they got reset since |
2733 | * EvalPlanQualStart (see comments therein). |
2734 | */ |
2735 | ExecSetParamPlanMulti(rcplanstate->plan->extParam, |
2736 | GetPerTupleExprContext(parentestate)); |
2737 | |
2738 | i = list_length(parentestate->es_plannedstmt->paramExecTypes); |
2739 | |
2740 | while (--i >= 0) |
2741 | { |
2742 | /* copy value if any, but not execPlan link */ |
2743 | recheckestate->es_param_exec_vals[i].value = |
2744 | parentestate->es_param_exec_vals[i].value; |
2745 | recheckestate->es_param_exec_vals[i].isnull = |
2746 | parentestate->es_param_exec_vals[i].isnull; |
2747 | } |
2748 | } |
2749 | |
2750 | /* |
2751 | * Mark child plan tree as needing rescan at all scan nodes. The |
2752 | * first ExecProcNode will take care of actually doing the rescan. |
2753 | */ |
2754 | rcplanstate->chgParam = bms_add_member(rcplanstate->chgParam, |
2755 | epqstate->epqParam); |
2756 | } |
2757 | } |
2758 | |
2759 | /* |
2760 | * Start execution of an EvalPlanQual plan tree. |
2761 | * |
2762 | * This is a cut-down version of ExecutorStart(): we copy some state from |
2763 | * the top-level estate rather than initializing it fresh. |
2764 | */ |
2765 | static void |
2766 | EvalPlanQualStart(EPQState *epqstate, Plan *planTree) |
2767 | { |
2768 | EState *parentestate = epqstate->parentestate; |
2769 | Index rtsize = parentestate->es_range_table_size; |
2770 | EState *rcestate; |
2771 | MemoryContext oldcontext; |
2772 | ListCell *l; |
2773 | |
2774 | epqstate->recheckestate = rcestate = CreateExecutorState(); |
2775 | |
2776 | oldcontext = MemoryContextSwitchTo(rcestate->es_query_cxt); |
2777 | |
2778 | /* signal that this is an EState for executing EPQ */ |
2779 | rcestate->es_epq_active = epqstate; |
2780 | |
2781 | /* |
2782 | * Child EPQ EStates share the parent's copy of unchanging state such as |
2783 | * the snapshot, rangetable, result-rel info, and external Param info. |
2784 | * They need their own copies of local state, including a tuple table, |
2785 | * es_param_exec_vals, etc. |
2786 | * |
2787 | * The ResultRelInfo array management is trickier than it looks. We |
2788 | * create fresh arrays for the child but copy all the content from the |
2789 | * parent. This is because it's okay for the child to share any |
2790 | * per-relation state the parent has already created --- but if the child |
2791 | * sets up any ResultRelInfo fields, such as its own junkfilter, that |
2792 | * state must *not* propagate back to the parent. (For one thing, the |
2793 | * pointed-to data is in a memory context that won't last long enough.) |
2794 | */ |
2795 | rcestate->es_direction = ForwardScanDirection; |
2796 | rcestate->es_snapshot = parentestate->es_snapshot; |
2797 | rcestate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot; |
2798 | rcestate->es_range_table = parentestate->es_range_table; |
2799 | rcestate->es_range_table_array = parentestate->es_range_table_array; |
2800 | rcestate->es_range_table_size = parentestate->es_range_table_size; |
2801 | rcestate->es_relations = parentestate->es_relations; |
2802 | rcestate->es_queryEnv = parentestate->es_queryEnv; |
2803 | rcestate->es_rowmarks = parentestate->es_rowmarks; |
2804 | rcestate->es_plannedstmt = parentestate->es_plannedstmt; |
2805 | rcestate->es_junkFilter = parentestate->es_junkFilter; |
2806 | rcestate->es_output_cid = parentestate->es_output_cid; |
2807 | if (parentestate->es_num_result_relations > 0) |
2808 | { |
2809 | int numResultRelations = parentestate->es_num_result_relations; |
2810 | int numRootResultRels = parentestate->es_num_root_result_relations; |
2811 | ResultRelInfo *resultRelInfos; |
2812 | |
2813 | resultRelInfos = (ResultRelInfo *) |
2814 | palloc(numResultRelations * sizeof(ResultRelInfo)); |
2815 | memcpy(resultRelInfos, parentestate->es_result_relations, |
2816 | numResultRelations * sizeof(ResultRelInfo)); |
2817 | rcestate->es_result_relations = resultRelInfos; |
2818 | rcestate->es_num_result_relations = numResultRelations; |
2819 | |
2820 | /* Also transfer partitioned root result relations. */ |
2821 | if (numRootResultRels > 0) |
2822 | { |
2823 | resultRelInfos = (ResultRelInfo *) |
2824 | palloc(numRootResultRels * sizeof(ResultRelInfo)); |
2825 | memcpy(resultRelInfos, parentestate->es_root_result_relations, |
2826 | numRootResultRels * sizeof(ResultRelInfo)); |
2827 | rcestate->es_root_result_relations = resultRelInfos; |
2828 | rcestate->es_num_root_result_relations = numRootResultRels; |
2829 | } |
2830 | } |
2831 | /* es_result_relation_info must NOT be copied */ |
2832 | /* es_trig_target_relations must NOT be copied */ |
2833 | rcestate->es_top_eflags = parentestate->es_top_eflags; |
2834 | rcestate->es_instrument = parentestate->es_instrument; |
2835 | /* es_auxmodifytables must NOT be copied */ |
2836 | |
2837 | /* |
2838 | * The external param list is simply shared from parent. The internal |
2839 | * param workspace has to be local state, but we copy the initial values |
2840 | * from the parent, so as to have access to any param values that were |
2841 | * already set from other parts of the parent's plan tree. |
2842 | */ |
2843 | rcestate->es_param_list_info = parentestate->es_param_list_info; |
2844 | if (parentestate->es_plannedstmt->paramExecTypes != NIL) |
2845 | { |
2846 | int i; |
2847 | |
2848 | /* |
2849 | * Force evaluation of any InitPlan outputs that could be needed by |
2850 | * the subplan. (With more complexity, maybe we could postpone this |
2851 | * till the subplan actually demands them, but it doesn't seem worth |
2852 | * the trouble; this is a corner case already, since usually the |
2853 | * InitPlans would have been evaluated before reaching EvalPlanQual.) |
2854 | * |
2855 | * This will not touch output params of InitPlans that occur somewhere |
2856 | * within the subplan tree, only those that are attached to the |
2857 | * ModifyTable node or above it and are referenced within the subplan. |
2858 | * That's OK though, because the planner would only attach such |
2859 | * InitPlans to a lower-level SubqueryScan node, and EPQ execution |
2860 | * will not descend into a SubqueryScan. |
2861 | * |
2862 | * The EState's per-output-tuple econtext is sufficiently short-lived |
2863 | * for this, since it should get reset before there is any chance of |
2864 | * doing EvalPlanQual again. |
2865 | */ |
2866 | ExecSetParamPlanMulti(planTree->extParam, |
2867 | GetPerTupleExprContext(parentestate)); |
2868 | |
2869 | /* now make the internal param workspace ... */ |
2870 | i = list_length(parentestate->es_plannedstmt->paramExecTypes); |
2871 | rcestate->es_param_exec_vals = (ParamExecData *) |
2872 | palloc0(i * sizeof(ParamExecData)); |
2873 | /* ... and copy down all values, whether really needed or not */ |
2874 | while (--i >= 0) |
2875 | { |
2876 | /* copy value if any, but not execPlan link */ |
2877 | rcestate->es_param_exec_vals[i].value = |
2878 | parentestate->es_param_exec_vals[i].value; |
2879 | rcestate->es_param_exec_vals[i].isnull = |
2880 | parentestate->es_param_exec_vals[i].isnull; |
2881 | } |
2882 | } |
2883 | |
2884 | /* |
2885 | * Initialize private state information for each SubPlan. We must do this |
2886 | * before running ExecInitNode on the main query tree, since |
2887 | * ExecInitSubPlan expects to be able to find these entries. Some of the |
2888 | * SubPlans might not be used in the part of the plan tree we intend to |
2889 | * run, but since it's not easy to tell which, we just initialize them |
2890 | * all. |
2891 | */ |
2892 | Assert(rcestate->es_subplanstates == NIL); |
2893 | foreach(l, parentestate->es_plannedstmt->subplans) |
2894 | { |
2895 | Plan *subplan = (Plan *) lfirst(l); |
2896 | PlanState *subplanstate; |
2897 | |
2898 | subplanstate = ExecInitNode(subplan, rcestate, 0); |
2899 | rcestate->es_subplanstates = lappend(rcestate->es_subplanstates, |
2900 | subplanstate); |
2901 | } |
2902 | |
2903 | /* |
2904 | * These arrays are reused across different plans set with |
2905 | * EvalPlanQualSetPlan(), which is safe because they all use the same |
2906 | * parent EState. Therefore we can reuse if already allocated. |
2907 | */ |
2908 | if (epqstate->relsubs_rowmark == NULL) |
2909 | { |
2910 | Assert(epqstate->relsubs_done == NULL); |
2911 | epqstate->relsubs_rowmark = (ExecAuxRowMark **) |
2912 | palloc0(rtsize * sizeof(ExecAuxRowMark *)); |
2913 | epqstate->relsubs_done = (bool *) |
2914 | palloc0(rtsize * sizeof(bool)); |
2915 | } |
2916 | else |
2917 | { |
2918 | Assert(epqstate->relsubs_done != NULL); |
2919 | memset(epqstate->relsubs_rowmark, 0, |
2920 | rtsize * sizeof(ExecAuxRowMark *)); |
2921 | memset(epqstate->relsubs_done, 0, |
2922 | rtsize * sizeof(bool)); |
2923 | } |
2924 | |
2925 | /* |
2926 | * Build an RTI indexed array of rowmarks, so that |
2927 | * EvalPlanQualFetchRowMark() can efficiently access the to be fetched |
2928 | * rowmark. |
2929 | */ |
2930 | foreach(l, epqstate->arowMarks) |
2931 | { |
2932 | ExecAuxRowMark *earm = (ExecAuxRowMark *) lfirst(l); |
2933 | |
2934 | epqstate->relsubs_rowmark[earm->rowmark->rti - 1] = earm; |
2935 | } |
2936 | |
2937 | /* |
2938 | * Initialize the private state information for all the nodes in the part |
2939 | * of the plan tree we need to run. This opens files, allocates storage |
2940 | * and leaves us ready to start processing tuples. |
2941 | */ |
2942 | epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0); |
2943 | |
2944 | MemoryContextSwitchTo(oldcontext); |
2945 | } |
2946 | |
2947 | /* |
2948 | * EvalPlanQualEnd -- shut down at termination of parent plan state node, |
2949 | * or if we are done with the current EPQ child. |
2950 | * |
2951 | * This is a cut-down version of ExecutorEnd(); basically we want to do most |
2952 | * of the normal cleanup, but *not* close result relations (which we are |
2953 | * just sharing from the outer query). We do, however, have to close any |
2954 | * trigger target relations that got opened, since those are not shared. |
2955 | * (There probably shouldn't be any of the latter, but just in case...) |
2956 | */ |
2957 | void |
2958 | EvalPlanQualEnd(EPQState *epqstate) |
2959 | { |
2960 | EState *estate = epqstate->recheckestate; |
2961 | Index rtsize; |
2962 | MemoryContext oldcontext; |
2963 | ListCell *l; |
2964 | |
2965 | rtsize = epqstate->parentestate->es_range_table_size; |
2966 | |
2967 | /* |
2968 | * We may have a tuple table, even if EPQ wasn't started, because we allow |
2969 | * use of EvalPlanQualSlot() without calling EvalPlanQualBegin(). |
2970 | */ |
2971 | if (epqstate->tuple_table != NIL) |
2972 | { |
2973 | memset(epqstate->relsubs_slot, 0, |
2974 | rtsize * sizeof(TupleTableSlot *)); |
2975 | ExecResetTupleTable(epqstate->tuple_table, true); |
2976 | epqstate->tuple_table = NIL; |
2977 | } |
2978 | |
2979 | /* EPQ wasn't started, nothing further to do */ |
2980 | if (estate == NULL) |
2981 | return; |
2982 | |
2983 | oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); |
2984 | |
2985 | ExecEndNode(epqstate->recheckplanstate); |
2986 | |
2987 | foreach(l, estate->es_subplanstates) |
2988 | { |
2989 | PlanState *subplanstate = (PlanState *) lfirst(l); |
2990 | |
2991 | ExecEndNode(subplanstate); |
2992 | } |
2993 | |
2994 | /* throw away the per-estate tuple table, some node may have used it */ |
2995 | ExecResetTupleTable(estate->es_tupleTable, false); |
2996 | |
2997 | /* close any trigger target relations attached to this EState */ |
2998 | ExecCleanUpTriggerState(estate); |
2999 | |
3000 | MemoryContextSwitchTo(oldcontext); |
3001 | |
3002 | FreeExecutorState(estate); |
3003 | |
3004 | /* Mark EPQState idle */ |
3005 | epqstate->recheckestate = NULL; |
3006 | epqstate->recheckplanstate = NULL; |
3007 | epqstate->origslot = NULL; |
3008 | } |
3009 | |