1/*-------------------------------------------------------------------------
2 *
3 * plannodes.h
4 * definitions for query plan nodes
5 *
6 *
7 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 * src/include/nodes/plannodes.h
11 *
12 *-------------------------------------------------------------------------
13 */
14#ifndef PLANNODES_H
15#define PLANNODES_H
16
17#include "access/sdir.h"
18#include "access/stratnum.h"
19#include "lib/stringinfo.h"
20#include "nodes/bitmapset.h"
21#include "nodes/lockoptions.h"
22#include "nodes/primnodes.h"
23
24
25/* ----------------------------------------------------------------
26 * node definitions
27 * ----------------------------------------------------------------
28 */
29
30/* ----------------
31 * PlannedStmt node
32 *
33 * The output of the planner is a Plan tree headed by a PlannedStmt node.
34 * PlannedStmt holds the "one time" information needed by the executor.
35 *
36 * For simplicity in APIs, we also wrap utility statements in PlannedStmt
37 * nodes; in such cases, commandType == CMD_UTILITY, the statement itself
38 * is in the utilityStmt field, and the rest of the struct is mostly dummy.
39 * (We do use canSetTag, stmt_location, stmt_len, and possibly queryId.)
40 * ----------------
41 */
42typedef struct PlannedStmt
43{
44 NodeTag type;
45
46 CmdType commandType; /* select|insert|update|delete|utility */
47
48 uint64 queryId; /* query identifier (copied from Query) */
49
50 bool hasReturning; /* is it insert|update|delete RETURNING? */
51
52 bool hasModifyingCTE; /* has insert|update|delete in WITH? */
53
54 bool canSetTag; /* do I set the command result tag? */
55
56 bool transientPlan; /* redo plan when TransactionXmin changes? */
57
58 bool dependsOnRole; /* is plan specific to current role? */
59
60 bool parallelModeNeeded; /* parallel mode required to execute? */
61
62 int jitFlags; /* which forms of JIT should be performed */
63
64 struct Plan *planTree; /* tree of Plan nodes */
65
66 List *rtable; /* list of RangeTblEntry nodes */
67
68 /* rtable indexes of target relations for INSERT/UPDATE/DELETE */
69 List *resultRelations; /* integer list of RT indexes, or NIL */
70
71 /*
72 * rtable indexes of partitioned table roots that are UPDATE/DELETE
73 * targets; needed for trigger firing.
74 */
75 List *rootResultRelations;
76
77 List *subplans; /* Plan trees for SubPlan expressions; note
78 * that some could be NULL */
79
80 Bitmapset *rewindPlanIDs; /* indices of subplans that require REWIND */
81
82 List *rowMarks; /* a list of PlanRowMark's */
83
84 List *relationOids; /* OIDs of relations the plan depends on */
85
86 List *invalItems; /* other dependencies, as PlanInvalItems */
87
88 List *paramExecTypes; /* type OIDs for PARAM_EXEC Params */
89
90 Node *utilityStmt; /* non-null if this is utility stmt */
91
92 /* statement location in source string (copied from Query) */
93 int stmt_location; /* start location, or -1 if unknown */
94 int stmt_len; /* length in bytes; 0 means "rest of string" */
95} PlannedStmt;
96
97/* macro for fetching the Plan associated with a SubPlan node */
98#define exec_subplan_get_plan(plannedstmt, subplan) \
99 ((Plan *) list_nth((plannedstmt)->subplans, (subplan)->plan_id - 1))
100
101
102/* ----------------
103 * Plan node
104 *
105 * All plan nodes "derive" from the Plan structure by having the
106 * Plan structure as the first field. This ensures that everything works
107 * when nodes are cast to Plan's. (node pointers are frequently cast to Plan*
108 * when passed around generically in the executor)
109 *
110 * We never actually instantiate any Plan nodes; this is just the common
111 * abstract superclass for all Plan-type nodes.
112 * ----------------
113 */
114typedef struct Plan
115{
116 NodeTag type;
117
118 /*
119 * estimated execution costs for plan (see costsize.c for more info)
120 */
121 Cost startup_cost; /* cost expended before fetching any tuples */
122 Cost total_cost; /* total cost (assuming all tuples fetched) */
123
124 /*
125 * planner's estimate of result size of this plan step
126 */
127 double plan_rows; /* number of rows plan is expected to emit */
128 int plan_width; /* average row width in bytes */
129
130 /*
131 * information needed for parallel query
132 */
133 bool parallel_aware; /* engage parallel-aware logic? */
134 bool parallel_safe; /* OK to use as part of parallel plan? */
135
136 /*
137 * Common structural data for all Plan types.
138 */
139 int plan_node_id; /* unique across entire final plan tree */
140 List *targetlist; /* target list to be computed at this node */
141 List *qual; /* implicitly-ANDed qual conditions */
142 struct Plan *lefttree; /* input plan tree(s) */
143 struct Plan *righttree;
144 List *initPlan; /* Init Plan nodes (un-correlated expr
145 * subselects) */
146
147 /*
148 * Information for management of parameter-change-driven rescanning
149 *
150 * extParam includes the paramIDs of all external PARAM_EXEC params
151 * affecting this plan node or its children. setParam params from the
152 * node's initPlans are not included, but their extParams are.
153 *
154 * allParam includes all the extParam paramIDs, plus the IDs of local
155 * params that affect the node (i.e., the setParams of its initplans).
156 * These are _all_ the PARAM_EXEC params that affect this node.
157 */
158 Bitmapset *extParam;
159 Bitmapset *allParam;
160} Plan;
161
162/* ----------------
163 * these are defined to avoid confusion problems with "left"
164 * and "right" and "inner" and "outer". The convention is that
165 * the "left" plan is the "outer" plan and the "right" plan is
166 * the inner plan, but these make the code more readable.
167 * ----------------
168 */
169#define innerPlan(node) (((Plan *)(node))->righttree)
170#define outerPlan(node) (((Plan *)(node))->lefttree)
171
172
173/* ----------------
174 * Result node -
175 * If no outer plan, evaluate a variable-free targetlist.
176 * If outer plan, return tuples from outer plan (after a level of
177 * projection as shown by targetlist).
178 *
179 * If resconstantqual isn't NULL, it represents a one-time qualification
180 * test (i.e., one that doesn't depend on any variables from the outer plan,
181 * so needs to be evaluated only once).
182 * ----------------
183 */
184typedef struct Result
185{
186 Plan plan;
187 Node *resconstantqual;
188} Result;
189
190/* ----------------
191 * ProjectSet node -
192 * Apply a projection that includes set-returning functions to the
193 * output tuples of the outer plan.
194 * ----------------
195 */
196typedef struct ProjectSet
197{
198 Plan plan;
199} ProjectSet;
200
201/* ----------------
202 * ModifyTable node -
203 * Apply rows produced by subplan(s) to result table(s),
204 * by inserting, updating, or deleting.
205 *
206 * If the originally named target table is a partitioned table, both
207 * nominalRelation and rootRelation contain the RT index of the partition
208 * root, which is not otherwise mentioned in the plan. Otherwise rootRelation
209 * is zero. However, nominalRelation will always be set, as it's the rel that
210 * EXPLAIN should claim is the INSERT/UPDATE/DELETE target.
211 *
212 * Note that rowMarks and epqParam are presumed to be valid for all the
213 * subplan(s); they can't contain any info that varies across subplans.
214 * ----------------
215 */
216typedef struct ModifyTable
217{
218 Plan plan;
219 CmdType operation; /* INSERT, UPDATE, or DELETE */
220 bool canSetTag; /* do we set the command tag/es_processed? */
221 Index nominalRelation; /* Parent RT index for use of EXPLAIN */
222 Index rootRelation; /* Root RT index, if target is partitioned */
223 bool partColsUpdated; /* some part key in hierarchy updated */
224 List *resultRelations; /* integer list of RT indexes */
225 int resultRelIndex; /* index of first resultRel in plan's list */
226 int rootResultRelIndex; /* index of the partitioned table root */
227 List *plans; /* plan(s) producing source data */
228 List *withCheckOptionLists; /* per-target-table WCO lists */
229 List *returningLists; /* per-target-table RETURNING tlists */
230 List *fdwPrivLists; /* per-target-table FDW private data lists */
231 Bitmapset *fdwDirectModifyPlans; /* indices of FDW DM plans */
232 List *rowMarks; /* PlanRowMarks (non-locking only) */
233 int epqParam; /* ID of Param for EvalPlanQual re-eval */
234 OnConflictAction onConflictAction; /* ON CONFLICT action */
235 List *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs */
236 List *onConflictSet; /* SET for INSERT ON CONFLICT DO UPDATE */
237 Node *onConflictWhere; /* WHERE for ON CONFLICT UPDATE */
238 Index exclRelRTI; /* RTI of the EXCLUDED pseudo relation */
239 List *exclRelTlist; /* tlist of the EXCLUDED pseudo relation */
240} ModifyTable;
241
242struct PartitionPruneInfo; /* forward reference to struct below */
243
244/* ----------------
245 * Append node -
246 * Generate the concatenation of the results of sub-plans.
247 * ----------------
248 */
249typedef struct Append
250{
251 Plan plan;
252 List *appendplans;
253
254 /*
255 * All 'appendplans' preceding this index are non-partial plans. All
256 * 'appendplans' from this index onwards are partial plans.
257 */
258 int first_partial_plan;
259
260 /* Info for run-time subplan pruning; NULL if we're not doing that */
261 struct PartitionPruneInfo *part_prune_info;
262} Append;
263
264/* ----------------
265 * MergeAppend node -
266 * Merge the results of pre-sorted sub-plans to preserve the ordering.
267 * ----------------
268 */
269typedef struct MergeAppend
270{
271 Plan plan;
272 List *mergeplans;
273 /* these fields are just like the sort-key info in struct Sort: */
274 int numCols; /* number of sort-key columns */
275 AttrNumber *sortColIdx; /* their indexes in the target list */
276 Oid *sortOperators; /* OIDs of operators to sort them by */
277 Oid *collations; /* OIDs of collations */
278 bool *nullsFirst; /* NULLS FIRST/LAST directions */
279 /* Info for run-time subplan pruning; NULL if we're not doing that */
280 struct PartitionPruneInfo *part_prune_info;
281} MergeAppend;
282
283/* ----------------
284 * RecursiveUnion node -
285 * Generate a recursive union of two subplans.
286 *
287 * The "outer" subplan is always the non-recursive term, and the "inner"
288 * subplan is the recursive term.
289 * ----------------
290 */
291typedef struct RecursiveUnion
292{
293 Plan plan;
294 int wtParam; /* ID of Param representing work table */
295 /* Remaining fields are zero/null in UNION ALL case */
296 int numCols; /* number of columns to check for
297 * duplicate-ness */
298 AttrNumber *dupColIdx; /* their indexes in the target list */
299 Oid *dupOperators; /* equality operators to compare with */
300 Oid *dupCollations;
301 long numGroups; /* estimated number of groups in input */
302} RecursiveUnion;
303
304/* ----------------
305 * BitmapAnd node -
306 * Generate the intersection of the results of sub-plans.
307 *
308 * The subplans must be of types that yield tuple bitmaps. The targetlist
309 * and qual fields of the plan are unused and are always NIL.
310 * ----------------
311 */
312typedef struct BitmapAnd
313{
314 Plan plan;
315 List *bitmapplans;
316} BitmapAnd;
317
318/* ----------------
319 * BitmapOr node -
320 * Generate the union of the results of sub-plans.
321 *
322 * The subplans must be of types that yield tuple bitmaps. The targetlist
323 * and qual fields of the plan are unused and are always NIL.
324 * ----------------
325 */
326typedef struct BitmapOr
327{
328 Plan plan;
329 bool isshared;
330 List *bitmapplans;
331} BitmapOr;
332
333/*
334 * ==========
335 * Scan nodes
336 * ==========
337 */
338typedef struct Scan
339{
340 Plan plan;
341 Index scanrelid; /* relid is index into the range table */
342} Scan;
343
344/* ----------------
345 * sequential scan node
346 * ----------------
347 */
348typedef Scan SeqScan;
349
350/* ----------------
351 * table sample scan node
352 * ----------------
353 */
354typedef struct SampleScan
355{
356 Scan scan;
357 /* use struct pointer to avoid including parsenodes.h here */
358 struct TableSampleClause *tablesample;
359} SampleScan;
360
361/* ----------------
362 * index scan node
363 *
364 * indexqualorig is an implicitly-ANDed list of index qual expressions, each
365 * in the same form it appeared in the query WHERE condition. Each should
366 * be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
367 * The indexkey is a Var or expression referencing column(s) of the index's
368 * base table. The comparisonval might be any expression, but it won't use
369 * any columns of the base table. The expressions are ordered by index
370 * column position (but items referencing the same index column can appear
371 * in any order). indexqualorig is used at runtime only if we have to recheck
372 * a lossy indexqual.
373 *
374 * indexqual has the same form, but the expressions have been commuted if
375 * necessary to put the indexkeys on the left, and the indexkeys are replaced
376 * by Var nodes identifying the index columns (their varno is INDEX_VAR and
377 * their varattno is the index column number).
378 *
379 * indexorderbyorig is similarly the original form of any ORDER BY expressions
380 * that are being implemented by the index, while indexorderby is modified to
381 * have index column Vars on the left-hand side. Here, multiple expressions
382 * must appear in exactly the ORDER BY order, and this is not necessarily the
383 * index column order. Only the expressions are provided, not the auxiliary
384 * sort-order information from the ORDER BY SortGroupClauses; it's assumed
385 * that the sort ordering is fully determinable from the top-level operators.
386 * indexorderbyorig is used at runtime to recheck the ordering, if the index
387 * cannot calculate an accurate ordering. It is also needed for EXPLAIN.
388 *
389 * indexorderbyops is a list of the OIDs of the operators used to sort the
390 * ORDER BY expressions. This is used together with indexorderbyorig to
391 * recheck ordering at run time. (Note that indexorderby, indexorderbyorig,
392 * and indexorderbyops are used for amcanorderbyop cases, not amcanorder.)
393 *
394 * indexorderdir specifies the scan ordering, for indexscans on amcanorder
395 * indexes (for other indexes it should be "don't care").
396 * ----------------
397 */
398typedef struct IndexScan
399{
400 Scan scan;
401 Oid indexid; /* OID of index to scan */
402 List *indexqual; /* list of index quals (usually OpExprs) */
403 List *indexqualorig; /* the same in original form */
404 List *indexorderby; /* list of index ORDER BY exprs */
405 List *indexorderbyorig; /* the same in original form */
406 List *indexorderbyops; /* OIDs of sort ops for ORDER BY exprs */
407 ScanDirection indexorderdir; /* forward or backward or don't care */
408} IndexScan;
409
410/* ----------------
411 * index-only scan node
412 *
413 * IndexOnlyScan is very similar to IndexScan, but it specifies an
414 * index-only scan, in which the data comes from the index not the heap.
415 * Because of this, *all* Vars in the plan node's targetlist, qual, and
416 * index expressions reference index columns and have varno = INDEX_VAR.
417 * Hence we do not need separate indexqualorig and indexorderbyorig lists,
418 * since their contents would be equivalent to indexqual and indexorderby.
419 *
420 * To help EXPLAIN interpret the index Vars for display, we provide
421 * indextlist, which represents the contents of the index as a targetlist
422 * with one TLE per index column. Vars appearing in this list reference
423 * the base table, and this is the only field in the plan node that may
424 * contain such Vars.
425 * ----------------
426 */
427typedef struct IndexOnlyScan
428{
429 Scan scan;
430 Oid indexid; /* OID of index to scan */
431 List *indexqual; /* list of index quals (usually OpExprs) */
432 List *indexorderby; /* list of index ORDER BY exprs */
433 List *indextlist; /* TargetEntry list describing index's cols */
434 ScanDirection indexorderdir; /* forward or backward or don't care */
435} IndexOnlyScan;
436
437/* ----------------
438 * bitmap index scan node
439 *
440 * BitmapIndexScan delivers a bitmap of potential tuple locations;
441 * it does not access the heap itself. The bitmap is used by an
442 * ancestor BitmapHeapScan node, possibly after passing through
443 * intermediate BitmapAnd and/or BitmapOr nodes to combine it with
444 * the results of other BitmapIndexScans.
445 *
446 * The fields have the same meanings as for IndexScan, except we don't
447 * store a direction flag because direction is uninteresting.
448 *
449 * In a BitmapIndexScan plan node, the targetlist and qual fields are
450 * not used and are always NIL. The indexqualorig field is unused at
451 * run time too, but is saved for the benefit of EXPLAIN.
452 * ----------------
453 */
454typedef struct BitmapIndexScan
455{
456 Scan scan;
457 Oid indexid; /* OID of index to scan */
458 bool isshared; /* Create shared bitmap if set */
459 List *indexqual; /* list of index quals (OpExprs) */
460 List *indexqualorig; /* the same in original form */
461} BitmapIndexScan;
462
463/* ----------------
464 * bitmap sequential scan node
465 *
466 * This needs a copy of the qual conditions being used by the input index
467 * scans because there are various cases where we need to recheck the quals;
468 * for example, when the bitmap is lossy about the specific rows on a page
469 * that meet the index condition.
470 * ----------------
471 */
472typedef struct BitmapHeapScan
473{
474 Scan scan;
475 List *bitmapqualorig; /* index quals, in standard expr form */
476} BitmapHeapScan;
477
478/* ----------------
479 * tid scan node
480 *
481 * tidquals is an implicitly OR'ed list of qual expressions of the form
482 * "CTID = pseudoconstant", or "CTID = ANY(pseudoconstant_array)",
483 * or a CurrentOfExpr for the relation.
484 * ----------------
485 */
486typedef struct TidScan
487{
488 Scan scan;
489 List *tidquals; /* qual(s) involving CTID = something */
490} TidScan;
491
492/* ----------------
493 * subquery scan node
494 *
495 * SubqueryScan is for scanning the output of a sub-query in the range table.
496 * We often need an extra plan node above the sub-query's plan to perform
497 * expression evaluations (which we can't push into the sub-query without
498 * risking changing its semantics). Although we are not scanning a physical
499 * relation, we make this a descendant of Scan anyway for code-sharing
500 * purposes.
501 *
502 * Note: we store the sub-plan in the type-specific subplan field, not in
503 * the generic lefttree field as you might expect. This is because we do
504 * not want plan-tree-traversal routines to recurse into the subplan without
505 * knowing that they are changing Query contexts.
506 * ----------------
507 */
508typedef struct SubqueryScan
509{
510 Scan scan;
511 Plan *subplan;
512} SubqueryScan;
513
514/* ----------------
515 * FunctionScan node
516 * ----------------
517 */
518typedef struct FunctionScan
519{
520 Scan scan;
521 List *functions; /* list of RangeTblFunction nodes */
522 bool funcordinality; /* WITH ORDINALITY */
523} FunctionScan;
524
525/* ----------------
526 * ValuesScan node
527 * ----------------
528 */
529typedef struct ValuesScan
530{
531 Scan scan;
532 List *values_lists; /* list of expression lists */
533} ValuesScan;
534
535/* ----------------
536 * TableFunc scan node
537 * ----------------
538 */
539typedef struct TableFuncScan
540{
541 Scan scan;
542 TableFunc *tablefunc; /* table function node */
543} TableFuncScan;
544
545/* ----------------
546 * CteScan node
547 * ----------------
548 */
549typedef struct CteScan
550{
551 Scan scan;
552 int ctePlanId; /* ID of init SubPlan for CTE */
553 int cteParam; /* ID of Param representing CTE output */
554} CteScan;
555
556/* ----------------
557 * NamedTuplestoreScan node
558 * ----------------
559 */
560typedef struct NamedTuplestoreScan
561{
562 Scan scan;
563 char *enrname; /* Name given to Ephemeral Named Relation */
564} NamedTuplestoreScan;
565
566/* ----------------
567 * WorkTableScan node
568 * ----------------
569 */
570typedef struct WorkTableScan
571{
572 Scan scan;
573 int wtParam; /* ID of Param representing work table */
574} WorkTableScan;
575
576/* ----------------
577 * ForeignScan node
578 *
579 * fdw_exprs and fdw_private are both under the control of the foreign-data
580 * wrapper, but fdw_exprs is presumed to contain expression trees and will
581 * be post-processed accordingly by the planner; fdw_private won't be.
582 * Note that everything in both lists must be copiable by copyObject().
583 * One way to store an arbitrary blob of bytes is to represent it as a bytea
584 * Const. Usually, though, you'll be better off choosing a representation
585 * that can be dumped usefully by nodeToString().
586 *
587 * fdw_scan_tlist is a targetlist describing the contents of the scan tuple
588 * returned by the FDW; it can be NIL if the scan tuple matches the declared
589 * rowtype of the foreign table, which is the normal case for a simple foreign
590 * table scan. (If the plan node represents a foreign join, fdw_scan_tlist
591 * is required since there is no rowtype available from the system catalogs.)
592 * When fdw_scan_tlist is provided, Vars in the node's tlist and quals must
593 * have varno INDEX_VAR, and their varattnos correspond to resnos in the
594 * fdw_scan_tlist (which are also column numbers in the actual scan tuple).
595 * fdw_scan_tlist is never actually executed; it just holds expression trees
596 * describing what is in the scan tuple's columns.
597 *
598 * fdw_recheck_quals should contain any quals which the core system passed to
599 * the FDW but which were not added to scan.plan.qual; that is, it should
600 * contain the quals being checked remotely. This is needed for correct
601 * behavior during EvalPlanQual rechecks.
602 *
603 * When the plan node represents a foreign join, scan.scanrelid is zero and
604 * fs_relids must be consulted to identify the join relation. (fs_relids
605 * is valid for simple scans as well, but will always match scan.scanrelid.)
606 * ----------------
607 */
608typedef struct ForeignScan
609{
610 Scan scan;
611 CmdType operation; /* SELECT/INSERT/UPDATE/DELETE */
612 Oid fs_server; /* OID of foreign server */
613 List *fdw_exprs; /* expressions that FDW may evaluate */
614 List *fdw_private; /* private data for FDW */
615 List *fdw_scan_tlist; /* optional tlist describing scan tuple */
616 List *fdw_recheck_quals; /* original quals not in scan.plan.qual */
617 Bitmapset *fs_relids; /* RTIs generated by this scan */
618 bool fsSystemCol; /* true if any "system column" is needed */
619} ForeignScan;
620
621/* ----------------
622 * CustomScan node
623 *
624 * The comments for ForeignScan's fdw_exprs, fdw_private, fdw_scan_tlist,
625 * and fs_relids fields apply equally to CustomScan's custom_exprs,
626 * custom_private, custom_scan_tlist, and custom_relids fields. The
627 * convention of setting scan.scanrelid to zero for joins applies as well.
628 *
629 * Note that since Plan trees can be copied, custom scan providers *must*
630 * fit all plan data they need into those fields; embedding CustomScan in
631 * a larger struct will not work.
632 * ----------------
633 */
634struct CustomScanMethods;
635
636typedef struct CustomScan
637{
638 Scan scan;
639 uint32 flags; /* mask of CUSTOMPATH_* flags, see
640 * nodes/extensible.h */
641 List *custom_plans; /* list of Plan nodes, if any */
642 List *custom_exprs; /* expressions that custom code may evaluate */
643 List *custom_private; /* private data for custom code */
644 List *custom_scan_tlist; /* optional tlist describing scan tuple */
645 Bitmapset *custom_relids; /* RTIs generated by this scan */
646 const struct CustomScanMethods *methods;
647} CustomScan;
648
649/*
650 * ==========
651 * Join nodes
652 * ==========
653 */
654
655/* ----------------
656 * Join node
657 *
658 * jointype: rule for joining tuples from left and right subtrees
659 * inner_unique each outer tuple can match to no more than one inner tuple
660 * joinqual: qual conditions that came from JOIN/ON or JOIN/USING
661 * (plan.qual contains conditions that came from WHERE)
662 *
663 * When jointype is INNER, joinqual and plan.qual are semantically
664 * interchangeable. For OUTER jointypes, the two are *not* interchangeable;
665 * only joinqual is used to determine whether a match has been found for
666 * the purpose of deciding whether to generate null-extended tuples.
667 * (But plan.qual is still applied before actually returning a tuple.)
668 * For an outer join, only joinquals are allowed to be used as the merge
669 * or hash condition of a merge or hash join.
670 *
671 * inner_unique is set if the joinquals are such that no more than one inner
672 * tuple could match any given outer tuple. This allows the executor to
673 * skip searching for additional matches. (This must be provable from just
674 * the joinquals, ignoring plan.qual, due to where the executor tests it.)
675 * ----------------
676 */
677typedef struct Join
678{
679 Plan plan;
680 JoinType jointype;
681 bool inner_unique;
682 List *joinqual; /* JOIN quals (in addition to plan.qual) */
683} Join;
684
685/* ----------------
686 * nest loop join node
687 *
688 * The nestParams list identifies any executor Params that must be passed
689 * into execution of the inner subplan carrying values from the current row
690 * of the outer subplan. Currently we restrict these values to be simple
691 * Vars, but perhaps someday that'd be worth relaxing. (Note: during plan
692 * creation, the paramval can actually be a PlaceHolderVar expression; but it
693 * must be a Var with varno OUTER_VAR by the time it gets to the executor.)
694 * ----------------
695 */
696typedef struct NestLoop
697{
698 Join join;
699 List *nestParams; /* list of NestLoopParam nodes */
700} NestLoop;
701
702typedef struct NestLoopParam
703{
704 NodeTag type;
705 int paramno; /* number of the PARAM_EXEC Param to set */
706 Var *paramval; /* outer-relation Var to assign to Param */
707} NestLoopParam;
708
709/* ----------------
710 * merge join node
711 *
712 * The expected ordering of each mergeable column is described by a btree
713 * opfamily OID, a collation OID, a direction (BTLessStrategyNumber or
714 * BTGreaterStrategyNumber) and a nulls-first flag. Note that the two sides
715 * of each mergeclause may be of different datatypes, but they are ordered the
716 * same way according to the common opfamily and collation. The operator in
717 * each mergeclause must be an equality operator of the indicated opfamily.
718 * ----------------
719 */
720typedef struct MergeJoin
721{
722 Join join;
723 bool skip_mark_restore; /* Can we skip mark/restore calls? */
724 List *mergeclauses; /* mergeclauses as expression trees */
725 /* these are arrays, but have the same length as the mergeclauses list: */
726 Oid *mergeFamilies; /* per-clause OIDs of btree opfamilies */
727 Oid *mergeCollations; /* per-clause OIDs of collations */
728 int *mergeStrategies; /* per-clause ordering (ASC or DESC) */
729 bool *mergeNullsFirst; /* per-clause nulls ordering */
730} MergeJoin;
731
732/* ----------------
733 * hash join node
734 * ----------------
735 */
736typedef struct HashJoin
737{
738 Join join;
739 List *hashclauses;
740 List *hashoperators;
741 List *hashcollations;
742
743 /*
744 * List of expressions to be hashed for tuples from the outer plan, to
745 * perform lookups in the hashtable over the inner plan.
746 */
747 List *hashkeys;
748} HashJoin;
749
750/* ----------------
751 * materialization node
752 * ----------------
753 */
754typedef struct Material
755{
756 Plan plan;
757} Material;
758
759/* ----------------
760 * sort node
761 * ----------------
762 */
763typedef struct Sort
764{
765 Plan plan;
766 int numCols; /* number of sort-key columns */
767 AttrNumber *sortColIdx; /* their indexes in the target list */
768 Oid *sortOperators; /* OIDs of operators to sort them by */
769 Oid *collations; /* OIDs of collations */
770 bool *nullsFirst; /* NULLS FIRST/LAST directions */
771} Sort;
772
773/* ---------------
774 * group node -
775 * Used for queries with GROUP BY (but no aggregates) specified.
776 * The input must be presorted according to the grouping columns.
777 * ---------------
778 */
779typedef struct Group
780{
781 Plan plan;
782 int numCols; /* number of grouping columns */
783 AttrNumber *grpColIdx; /* their indexes in the target list */
784 Oid *grpOperators; /* equality operators to compare with */
785 Oid *grpCollations;
786} Group;
787
788/* ---------------
789 * aggregate node
790 *
791 * An Agg node implements plain or grouped aggregation. For grouped
792 * aggregation, we can work with presorted input or unsorted input;
793 * the latter strategy uses an internal hashtable.
794 *
795 * Notice the lack of any direct info about the aggregate functions to be
796 * computed. They are found by scanning the node's tlist and quals during
797 * executor startup. (It is possible that there are no aggregate functions;
798 * this could happen if they get optimized away by constant-folding, or if
799 * we are using the Agg node to implement hash-based grouping.)
800 * ---------------
801 */
802typedef struct Agg
803{
804 Plan plan;
805 AggStrategy aggstrategy; /* basic strategy, see nodes.h */
806 AggSplit aggsplit; /* agg-splitting mode, see nodes.h */
807 int numCols; /* number of grouping columns */
808 AttrNumber *grpColIdx; /* their indexes in the target list */
809 Oid *grpOperators; /* equality operators to compare with */
810 Oid *grpCollations;
811 long numGroups; /* estimated number of groups in input */
812 Bitmapset *aggParams; /* IDs of Params used in Aggref inputs */
813 /* Note: planner provides numGroups & aggParams only in HASHED/MIXED case */
814 List *groupingSets; /* grouping sets to use */
815 List *chain; /* chained Agg/Sort nodes */
816} Agg;
817
818/* ----------------
819 * window aggregate node
820 * ----------------
821 */
822typedef struct WindowAgg
823{
824 Plan plan;
825 Index winref; /* ID referenced by window functions */
826 int partNumCols; /* number of columns in partition clause */
827 AttrNumber *partColIdx; /* their indexes in the target list */
828 Oid *partOperators; /* equality operators for partition columns */
829 Oid *partCollations; /* collations for partition columns */
830 int ordNumCols; /* number of columns in ordering clause */
831 AttrNumber *ordColIdx; /* their indexes in the target list */
832 Oid *ordOperators; /* equality operators for ordering columns */
833 Oid *ordCollations; /* collations for ordering columns */
834 int frameOptions; /* frame_clause options, see WindowDef */
835 Node *startOffset; /* expression for starting bound, if any */
836 Node *endOffset; /* expression for ending bound, if any */
837 /* these fields are used with RANGE offset PRECEDING/FOLLOWING: */
838 Oid startInRangeFunc; /* in_range function for startOffset */
839 Oid endInRangeFunc; /* in_range function for endOffset */
840 Oid inRangeColl; /* collation for in_range tests */
841 bool inRangeAsc; /* use ASC sort order for in_range tests? */
842 bool inRangeNullsFirst; /* nulls sort first for in_range tests? */
843} WindowAgg;
844
845/* ----------------
846 * unique node
847 * ----------------
848 */
849typedef struct Unique
850{
851 Plan plan;
852 int numCols; /* number of columns to check for uniqueness */
853 AttrNumber *uniqColIdx; /* their indexes in the target list */
854 Oid *uniqOperators; /* equality operators to compare with */
855 Oid *uniqCollations; /* collations for equality comparisons */
856} Unique;
857
858/* ------------
859 * gather node
860 *
861 * Note: rescan_param is the ID of a PARAM_EXEC parameter slot. That slot
862 * will never actually contain a value, but the Gather node must flag it as
863 * having changed whenever it is rescanned. The child parallel-aware scan
864 * nodes are marked as depending on that parameter, so that the rescan
865 * machinery is aware that their output is likely to change across rescans.
866 * In some cases we don't need a rescan Param, so rescan_param is set to -1.
867 * ------------
868 */
869typedef struct Gather
870{
871 Plan plan;
872 int num_workers; /* planned number of worker processes */
873 int rescan_param; /* ID of Param that signals a rescan, or -1 */
874 bool single_copy; /* don't execute plan more than once */
875 bool invisible; /* suppress EXPLAIN display (for testing)? */
876 Bitmapset *initParam; /* param id's of initplans which are referred
877 * at gather or one of it's child node */
878} Gather;
879
880/* ------------
881 * gather merge node
882 * ------------
883 */
884typedef struct GatherMerge
885{
886 Plan plan;
887 int num_workers; /* planned number of worker processes */
888 int rescan_param; /* ID of Param that signals a rescan, or -1 */
889 /* remaining fields are just like the sort-key info in struct Sort */
890 int numCols; /* number of sort-key columns */
891 AttrNumber *sortColIdx; /* their indexes in the target list */
892 Oid *sortOperators; /* OIDs of operators to sort them by */
893 Oid *collations; /* OIDs of collations */
894 bool *nullsFirst; /* NULLS FIRST/LAST directions */
895 Bitmapset *initParam; /* param id's of initplans which are referred
896 * at gather merge or one of it's child node */
897} GatherMerge;
898
899/* ----------------
900 * hash build node
901 *
902 * If the executor is supposed to try to apply skew join optimization, then
903 * skewTable/skewColumn/skewInherit identify the outer relation's join key
904 * column, from which the relevant MCV statistics can be fetched.
905 * ----------------
906 */
907typedef struct Hash
908{
909 Plan plan;
910
911 /*
912 * List of expressions to be hashed for tuples from Hash's outer plan,
913 * needed to put them into the hashtable.
914 */
915 List *hashkeys; /* hash keys for the hashjoin condition */
916 Oid skewTable; /* outer join key's table OID, or InvalidOid */
917 AttrNumber skewColumn; /* outer join key's column #, or zero */
918 bool skewInherit; /* is outer join rel an inheritance tree? */
919 /* all other info is in the parent HashJoin node */
920 double rows_total; /* estimate total rows if parallel_aware */
921} Hash;
922
923/* ----------------
924 * setop node
925 * ----------------
926 */
927typedef struct SetOp
928{
929 Plan plan;
930 SetOpCmd cmd; /* what to do, see nodes.h */
931 SetOpStrategy strategy; /* how to do it, see nodes.h */
932 int numCols; /* number of columns to check for
933 * duplicate-ness */
934 AttrNumber *dupColIdx; /* their indexes in the target list */
935 Oid *dupOperators; /* equality operators to compare with */
936 Oid *dupCollations;
937 AttrNumber flagColIdx; /* where is the flag column, if any */
938 int firstFlag; /* flag value for first input relation */
939 long numGroups; /* estimated number of groups in input */
940} SetOp;
941
942/* ----------------
943 * lock-rows node
944 *
945 * rowMarks identifies the rels to be locked by this node; it should be
946 * a subset of the rowMarks listed in the top-level PlannedStmt.
947 * epqParam is a Param that all scan nodes below this one must depend on.
948 * It is used to force re-evaluation of the plan during EvalPlanQual.
949 * ----------------
950 */
951typedef struct LockRows
952{
953 Plan plan;
954 List *rowMarks; /* a list of PlanRowMark's */
955 int epqParam; /* ID of Param for EvalPlanQual re-eval */
956} LockRows;
957
958/* ----------------
959 * limit node
960 *
961 * Note: as of Postgres 8.2, the offset and count expressions are expected
962 * to yield int8, rather than int4 as before.
963 * ----------------
964 */
965typedef struct Limit
966{
967 Plan plan;
968 Node *limitOffset; /* OFFSET parameter, or NULL if none */
969 Node *limitCount; /* COUNT parameter, or NULL if none */
970} Limit;
971
972
973/*
974 * RowMarkType -
975 * enums for types of row-marking operations
976 *
977 * The first four of these values represent different lock strengths that
978 * we can take on tuples according to SELECT FOR [KEY] UPDATE/SHARE requests.
979 * We support these on regular tables, as well as on foreign tables whose FDWs
980 * report support for late locking. For other foreign tables, any locking
981 * that might be done for such requests must happen during the initial row
982 * fetch; their FDWs provide no mechanism for going back to lock a row later.
983 * This means that the semantics will be a bit different than for a local
984 * table; in particular we are likely to lock more rows than would be locked
985 * locally, since remote rows will be locked even if they then fail
986 * locally-checked restriction or join quals. However, the prospect of
987 * doing a separate remote query to lock each selected row is usually pretty
988 * unappealing, so early locking remains a credible design choice for FDWs.
989 *
990 * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we have to uniquely
991 * identify all the source rows, not only those from the target relations, so
992 * that we can perform EvalPlanQual rechecking at need. For plain tables we
993 * can just fetch the TID, much as for a target relation; this case is
994 * represented by ROW_MARK_REFERENCE. Otherwise (for example for VALUES or
995 * FUNCTION scans) we have to copy the whole row value. ROW_MARK_COPY is
996 * pretty inefficient, since most of the time we'll never need the data; but
997 * fortunately the overhead is usually not performance-critical in practice.
998 * By default we use ROW_MARK_COPY for foreign tables, but if the FDW has
999 * a concept of rowid it can request to use ROW_MARK_REFERENCE instead.
1000 * (Again, this probably doesn't make sense if a physical remote fetch is
1001 * needed, but for FDWs that map to local storage it might be credible.)
1002 */
1003typedef enum RowMarkType
1004{
1005 ROW_MARK_EXCLUSIVE, /* obtain exclusive tuple lock */
1006 ROW_MARK_NOKEYEXCLUSIVE, /* obtain no-key exclusive tuple lock */
1007 ROW_MARK_SHARE, /* obtain shared tuple lock */
1008 ROW_MARK_KEYSHARE, /* obtain keyshare tuple lock */
1009 ROW_MARK_REFERENCE, /* just fetch the TID, don't lock it */
1010 ROW_MARK_COPY /* physically copy the row value */
1011} RowMarkType;
1012
1013#define RowMarkRequiresRowShareLock(marktype) ((marktype) <= ROW_MARK_KEYSHARE)
1014
1015/*
1016 * PlanRowMark -
1017 * plan-time representation of FOR [KEY] UPDATE/SHARE clauses
1018 *
1019 * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
1020 * PlanRowMark node for each non-target relation in the query. Relations that
1021 * are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
1022 * regular tables or supported foreign tables) or ROW_MARK_COPY (if not).
1023 *
1024 * Initially all PlanRowMarks have rti == prti and isParent == false.
1025 * When the planner discovers that a relation is the root of an inheritance
1026 * tree, it sets isParent true, and adds an additional PlanRowMark to the
1027 * list for each child relation (including the target rel itself in its role
1028 * as a child). isParent is also set to true for the partitioned child
1029 * relations, which are not scanned just like the root parent. The child
1030 * entries have rti == child rel's RT index and prti == parent's RT index,
1031 * and can therefore be recognized as children by the fact that prti != rti.
1032 * The parent's allMarkTypes field gets the OR of (1<<markType) across all
1033 * its children (this definition allows children to use different markTypes).
1034 *
1035 * The planner also adds resjunk output columns to the plan that carry
1036 * information sufficient to identify the locked or fetched rows. When
1037 * markType != ROW_MARK_COPY, these columns are named
1038 * tableoid%u OID of table
1039 * ctid%u TID of row
1040 * The tableoid column is only present for an inheritance hierarchy.
1041 * When markType == ROW_MARK_COPY, there is instead a single column named
1042 * wholerow%u whole-row value of relation
1043 * (An inheritance hierarchy could have all three resjunk output columns,
1044 * if some children use a different markType than others.)
1045 * In all three cases, %u represents the rowmark ID number (rowmarkId).
1046 * This number is unique within a plan tree, except that child relation
1047 * entries copy their parent's rowmarkId. (Assigning unique numbers
1048 * means we needn't renumber rowmarkIds when flattening subqueries, which
1049 * would require finding and renaming the resjunk columns as well.)
1050 * Note this means that all tables in an inheritance hierarchy share the
1051 * same resjunk column names. However, in an inherited UPDATE/DELETE the
1052 * columns could have different physical column numbers in each subplan.
1053 */
1054typedef struct PlanRowMark
1055{
1056 NodeTag type;
1057 Index rti; /* range table index of markable relation */
1058 Index prti; /* range table index of parent relation */
1059 Index rowmarkId; /* unique identifier for resjunk columns */
1060 RowMarkType markType; /* see enum above */
1061 int allMarkTypes; /* OR of (1<<markType) for all children */
1062 LockClauseStrength strength; /* LockingClause's strength, or LCS_NONE */
1063 LockWaitPolicy waitPolicy; /* NOWAIT and SKIP LOCKED options */
1064 bool isParent; /* true if this is a "dummy" parent entry */
1065} PlanRowMark;
1066
1067
1068/*
1069 * Node types to represent partition pruning information.
1070 */
1071
1072/*
1073 * PartitionPruneInfo - Details required to allow the executor to prune
1074 * partitions.
1075 *
1076 * Here we store mapping details to allow translation of a partitioned table's
1077 * index as returned by the partition pruning code into subplan indexes for
1078 * plan types which support arbitrary numbers of subplans, such as Append.
1079 * We also store various details to tell the executor when it should be
1080 * performing partition pruning.
1081 *
1082 * Each PartitionedRelPruneInfo describes the partitioning rules for a single
1083 * partitioned table (a/k/a level of partitioning). Since a partitioning
1084 * hierarchy could contain multiple levels, we represent it by a List of
1085 * PartitionedRelPruneInfos, where the first entry represents the topmost
1086 * partitioned table and additional entries represent non-leaf child
1087 * partitions, ordered such that parents appear before their children.
1088 * Then, since an Append-type node could have multiple partitioning
1089 * hierarchies among its children, we have an unordered List of those Lists.
1090 *
1091 * prune_infos List of Lists containing PartitionedRelPruneInfo nodes,
1092 * one sublist per run-time-prunable partition hierarchy
1093 * appearing in the parent plan node's subplans.
1094 * other_subplans Indexes of any subplans that are not accounted for
1095 * by any of the PartitionedRelPruneInfo nodes in
1096 * "prune_infos". These subplans must not be pruned.
1097 */
1098typedef struct PartitionPruneInfo
1099{
1100 NodeTag type;
1101 List *prune_infos;
1102 Bitmapset *other_subplans;
1103} PartitionPruneInfo;
1104
1105/*
1106 * PartitionedRelPruneInfo - Details required to allow the executor to prune
1107 * partitions for a single partitioned table.
1108 *
1109 * subplan_map[] and subpart_map[] are indexed by partition index of the
1110 * partitioned table referenced by 'rtindex', the partition index being the
1111 * order that the partitions are defined in the table's PartitionDesc. For a
1112 * leaf partition p, subplan_map[p] contains the zero-based index of the
1113 * partition's subplan in the parent plan's subplan list; it is -1 if the
1114 * partition is non-leaf or has been pruned. For a non-leaf partition p,
1115 * subpart_map[p] contains the zero-based index of that sub-partition's
1116 * PartitionedRelPruneInfo in the hierarchy's PartitionedRelPruneInfo list;
1117 * it is -1 if the partition is a leaf or has been pruned. Note that subplan
1118 * indexes, as stored in 'subplan_map', are global across the parent plan
1119 * node, but partition indexes are valid only within a particular hierarchy.
1120 * relid_map[p] contains the partition's OID, or 0 if the partition was pruned.
1121 */
1122typedef struct PartitionedRelPruneInfo
1123{
1124 NodeTag type;
1125 Index rtindex; /* RT index of partition rel for this level */
1126 Bitmapset *present_parts; /* Indexes of all partitions which subplans or
1127 * subparts are present for */
1128 int nparts; /* Length of the following arrays: */
1129 int *subplan_map; /* subplan index by partition index, or -1 */
1130 int *subpart_map; /* subpart index by partition index, or -1 */
1131 Oid *relid_map; /* relation OID by partition index, or 0 */
1132
1133 /*
1134 * initial_pruning_steps shows how to prune during executor startup (i.e.,
1135 * without use of any PARAM_EXEC Params); it is NIL if no startup pruning
1136 * is required. exec_pruning_steps shows how to prune with PARAM_EXEC
1137 * Params; it is NIL if no per-scan pruning is required.
1138 */
1139 List *initial_pruning_steps; /* List of PartitionPruneStep */
1140 List *exec_pruning_steps; /* List of PartitionPruneStep */
1141 Bitmapset *execparamids; /* All PARAM_EXEC Param IDs in
1142 * exec_pruning_steps */
1143} PartitionedRelPruneInfo;
1144
1145/*
1146 * Abstract Node type for partition pruning steps (there are no concrete
1147 * Nodes of this type).
1148 *
1149 * step_id is the global identifier of the step within its pruning context.
1150 */
1151typedef struct PartitionPruneStep
1152{
1153 NodeTag type;
1154 int step_id;
1155} PartitionPruneStep;
1156
1157/*
1158 * PartitionPruneStepOp - Information to prune using a set of mutually AND'd
1159 * OpExpr clauses
1160 *
1161 * This contains information extracted from up to partnatts OpExpr clauses,
1162 * where partnatts is the number of partition key columns. 'opstrategy' is the
1163 * strategy of the operator in the clause matched to the last partition key.
1164 * 'exprs' contains expressions which comprise the lookup key to be passed to
1165 * the partition bound search function. 'cmpfns' contains the OIDs of
1166 * comparison functions used to compare aforementioned expressions with
1167 * partition bounds. Both 'exprs' and 'cmpfns' contain the same number of
1168 * items, up to partnatts items.
1169 *
1170 * Once we find the offset of a partition bound using the lookup key, we
1171 * determine which partitions to include in the result based on the value of
1172 * 'opstrategy'. For example, if it were equality, we'd return just the
1173 * partition that would contain that key or a set of partitions if the key
1174 * didn't consist of all partitioning columns. For non-equality strategies,
1175 * we'd need to include other partitions as appropriate.
1176 *
1177 * 'nullkeys' is the set containing the offset of the partition keys (0 to
1178 * partnatts - 1) that were matched to an IS NULL clause. This is only
1179 * considered for hash partitioning as we need to pass which keys are null
1180 * to the hash partition bound search function. It is never possible to
1181 * have an expression be present in 'exprs' for a given partition key and
1182 * the corresponding bit set in 'nullkeys'.
1183 */
1184typedef struct PartitionPruneStepOp
1185{
1186 PartitionPruneStep step;
1187
1188 StrategyNumber opstrategy;
1189 List *exprs;
1190 List *cmpfns;
1191 Bitmapset *nullkeys;
1192} PartitionPruneStepOp;
1193
1194/*
1195 * PartitionPruneStepCombine - Information to prune using a BoolExpr clause
1196 *
1197 * For BoolExpr clauses, we combine the set of partitions determined for each
1198 * of the argument clauses.
1199 */
1200typedef enum PartitionPruneCombineOp
1201{
1202 PARTPRUNE_COMBINE_UNION,
1203 PARTPRUNE_COMBINE_INTERSECT
1204} PartitionPruneCombineOp;
1205
1206typedef struct PartitionPruneStepCombine
1207{
1208 PartitionPruneStep step;
1209
1210 PartitionPruneCombineOp combineOp;
1211 List *source_stepids;
1212} PartitionPruneStepCombine;
1213
1214
1215/*
1216 * Plan invalidation info
1217 *
1218 * We track the objects on which a PlannedStmt depends in two ways:
1219 * relations are recorded as a simple list of OIDs, and everything else
1220 * is represented as a list of PlanInvalItems. A PlanInvalItem is designed
1221 * to be used with the syscache invalidation mechanism, so it identifies a
1222 * system catalog entry by cache ID and hash value.
1223 */
1224typedef struct PlanInvalItem
1225{
1226 NodeTag type;
1227 int cacheId; /* a syscache ID, see utils/syscache.h */
1228 uint32 hashValue; /* hash value of object's cache lookup key */
1229} PlanInvalItem;
1230
1231#endif /* PLANNODES_H */
1232