1 | /*------------------------------------------------------------------------- |
2 | * |
3 | * spi.c |
4 | * Server Programming Interface |
5 | * |
6 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
7 | * Portions Copyright (c) 1994, Regents of the University of California |
8 | * |
9 | * |
10 | * IDENTIFICATION |
11 | * src/backend/executor/spi.c |
12 | * |
13 | *------------------------------------------------------------------------- |
14 | */ |
15 | #include "postgres.h" |
16 | |
17 | #include "access/htup_details.h" |
18 | #include "access/printtup.h" |
19 | #include "access/sysattr.h" |
20 | #include "access/xact.h" |
21 | #include "catalog/heap.h" |
22 | #include "catalog/pg_type.h" |
23 | #include "commands/trigger.h" |
24 | #include "executor/executor.h" |
25 | #include "executor/spi_priv.h" |
26 | #include "miscadmin.h" |
27 | #include "tcop/pquery.h" |
28 | #include "tcop/utility.h" |
29 | #include "utils/builtins.h" |
30 | #include "utils/datum.h" |
31 | #include "utils/lsyscache.h" |
32 | #include "utils/memutils.h" |
33 | #include "utils/rel.h" |
34 | #include "utils/snapmgr.h" |
35 | #include "utils/syscache.h" |
36 | #include "utils/typcache.h" |
37 | |
38 | |
39 | /* |
40 | * These global variables are part of the API for various SPI functions |
41 | * (a horrible API choice, but it's too late now). To reduce the risk of |
42 | * interference between different SPI callers, we save and restore them |
43 | * when entering/exiting a SPI nesting level. |
44 | */ |
45 | uint64 SPI_processed = 0; |
46 | SPITupleTable *SPI_tuptable = NULL; |
47 | int SPI_result = 0; |
48 | |
49 | static _SPI_connection *_SPI_stack = NULL; |
50 | static _SPI_connection *_SPI_current = NULL; |
51 | static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */ |
52 | static int _SPI_connected = -1; /* current stack index */ |
53 | |
54 | static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, |
55 | ParamListInfo paramLI, bool read_only); |
56 | |
57 | static void _SPI_prepare_plan(const char *src, SPIPlanPtr plan); |
58 | |
59 | static void _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan); |
60 | |
61 | static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, |
62 | Snapshot snapshot, Snapshot crosscheck_snapshot, |
63 | bool read_only, bool fire_triggers, uint64 tcount); |
64 | |
65 | static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes, |
66 | Datum *Values, const char *Nulls); |
67 | |
68 | static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount); |
69 | |
70 | static void _SPI_error_callback(void *arg); |
71 | |
72 | static void _SPI_cursor_operation(Portal portal, |
73 | FetchDirection direction, long count, |
74 | DestReceiver *dest); |
75 | |
76 | static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan); |
77 | static SPIPlanPtr _SPI_save_plan(SPIPlanPtr plan); |
78 | |
79 | static int _SPI_begin_call(bool use_exec); |
80 | static int _SPI_end_call(bool use_exec); |
81 | static MemoryContext _SPI_execmem(void); |
82 | static MemoryContext _SPI_procmem(void); |
83 | static bool _SPI_checktuples(void); |
84 | |
85 | |
86 | /* =================== interface functions =================== */ |
87 | |
88 | int |
89 | SPI_connect(void) |
90 | { |
91 | return SPI_connect_ext(0); |
92 | } |
93 | |
94 | int |
95 | SPI_connect_ext(int options) |
96 | { |
97 | int newdepth; |
98 | |
99 | /* Enlarge stack if necessary */ |
100 | if (_SPI_stack == NULL) |
101 | { |
102 | if (_SPI_connected != -1 || _SPI_stack_depth != 0) |
103 | elog(ERROR, "SPI stack corrupted" ); |
104 | newdepth = 16; |
105 | _SPI_stack = (_SPI_connection *) |
106 | MemoryContextAlloc(TopMemoryContext, |
107 | newdepth * sizeof(_SPI_connection)); |
108 | _SPI_stack_depth = newdepth; |
109 | } |
110 | else |
111 | { |
112 | if (_SPI_stack_depth <= 0 || _SPI_stack_depth <= _SPI_connected) |
113 | elog(ERROR, "SPI stack corrupted" ); |
114 | if (_SPI_stack_depth == _SPI_connected + 1) |
115 | { |
116 | newdepth = _SPI_stack_depth * 2; |
117 | _SPI_stack = (_SPI_connection *) |
118 | repalloc(_SPI_stack, |
119 | newdepth * sizeof(_SPI_connection)); |
120 | _SPI_stack_depth = newdepth; |
121 | } |
122 | } |
123 | |
124 | /* Enter new stack level */ |
125 | _SPI_connected++; |
126 | Assert(_SPI_connected >= 0 && _SPI_connected < _SPI_stack_depth); |
127 | |
128 | _SPI_current = &(_SPI_stack[_SPI_connected]); |
129 | _SPI_current->processed = 0; |
130 | _SPI_current->tuptable = NULL; |
131 | _SPI_current->execSubid = InvalidSubTransactionId; |
132 | slist_init(&_SPI_current->tuptables); |
133 | _SPI_current->procCxt = NULL; /* in case we fail to create 'em */ |
134 | _SPI_current->execCxt = NULL; |
135 | _SPI_current->connectSubid = GetCurrentSubTransactionId(); |
136 | _SPI_current->queryEnv = NULL; |
137 | _SPI_current->atomic = (options & SPI_OPT_NONATOMIC ? false : true); |
138 | _SPI_current->internal_xact = false; |
139 | _SPI_current->outer_processed = SPI_processed; |
140 | _SPI_current->outer_tuptable = SPI_tuptable; |
141 | _SPI_current->outer_result = SPI_result; |
142 | |
143 | /* |
144 | * Create memory contexts for this procedure |
145 | * |
146 | * In atomic contexts (the normal case), we use TopTransactionContext, |
147 | * otherwise PortalContext, so that it lives across transaction |
148 | * boundaries. |
149 | * |
150 | * XXX It could be better to use PortalContext as the parent context in |
151 | * all cases, but we may not be inside a portal (consider deferred-trigger |
152 | * execution). Perhaps CurTransactionContext could be an option? For now |
153 | * it doesn't matter because we clean up explicitly in AtEOSubXact_SPI(). |
154 | */ |
155 | _SPI_current->procCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : PortalContext, |
156 | "SPI Proc" , |
157 | ALLOCSET_DEFAULT_SIZES); |
158 | _SPI_current->execCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : _SPI_current->procCxt, |
159 | "SPI Exec" , |
160 | ALLOCSET_DEFAULT_SIZES); |
161 | /* ... and switch to procedure's context */ |
162 | _SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt); |
163 | |
164 | /* |
165 | * Reset API global variables so that current caller cannot accidentally |
166 | * depend on state of an outer caller. |
167 | */ |
168 | SPI_processed = 0; |
169 | SPI_tuptable = NULL; |
170 | SPI_result = 0; |
171 | |
172 | return SPI_OK_CONNECT; |
173 | } |
174 | |
175 | int |
176 | SPI_finish(void) |
177 | { |
178 | int res; |
179 | |
180 | res = _SPI_begin_call(false); /* just check we're connected */ |
181 | if (res < 0) |
182 | return res; |
183 | |
184 | /* Restore memory context as it was before procedure call */ |
185 | MemoryContextSwitchTo(_SPI_current->savedcxt); |
186 | |
187 | /* Release memory used in procedure call (including tuptables) */ |
188 | MemoryContextDelete(_SPI_current->execCxt); |
189 | _SPI_current->execCxt = NULL; |
190 | MemoryContextDelete(_SPI_current->procCxt); |
191 | _SPI_current->procCxt = NULL; |
192 | |
193 | /* |
194 | * Restore outer API variables, especially SPI_tuptable which is probably |
195 | * pointing at a just-deleted tuptable |
196 | */ |
197 | SPI_processed = _SPI_current->outer_processed; |
198 | SPI_tuptable = _SPI_current->outer_tuptable; |
199 | SPI_result = _SPI_current->outer_result; |
200 | |
201 | /* Exit stack level */ |
202 | _SPI_connected--; |
203 | if (_SPI_connected < 0) |
204 | _SPI_current = NULL; |
205 | else |
206 | _SPI_current = &(_SPI_stack[_SPI_connected]); |
207 | |
208 | return SPI_OK_FINISH; |
209 | } |
210 | |
211 | void |
212 | SPI_start_transaction(void) |
213 | { |
214 | MemoryContext oldcontext = CurrentMemoryContext; |
215 | |
216 | StartTransactionCommand(); |
217 | MemoryContextSwitchTo(oldcontext); |
218 | } |
219 | |
220 | static void |
221 | _SPI_commit(bool chain) |
222 | { |
223 | MemoryContext oldcontext = CurrentMemoryContext; |
224 | |
225 | if (_SPI_current->atomic) |
226 | ereport(ERROR, |
227 | (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), |
228 | errmsg("invalid transaction termination" ))); |
229 | |
230 | /* |
231 | * This restriction is required by PLs implemented on top of SPI. They |
232 | * use subtransactions to establish exception blocks that are supposed to |
233 | * be rolled back together if there is an error. Terminating the |
234 | * top-level transaction in such a block violates that idea. A future PL |
235 | * implementation might have different ideas about this, in which case |
236 | * this restriction would have to be refined or the check possibly be |
237 | * moved out of SPI into the PLs. |
238 | */ |
239 | if (IsSubTransaction()) |
240 | ereport(ERROR, |
241 | (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), |
242 | errmsg("cannot commit while a subtransaction is active" ))); |
243 | |
244 | /* |
245 | * Hold any pinned portals that any PLs might be using. We have to do |
246 | * this before changing transaction state, since this will run |
247 | * user-defined code that might throw an error. |
248 | */ |
249 | HoldPinnedPortals(); |
250 | |
251 | /* Start the actual commit */ |
252 | _SPI_current->internal_xact = true; |
253 | |
254 | /* |
255 | * Before committing, pop all active snapshots to avoid error about |
256 | * "snapshot %p still active". |
257 | */ |
258 | while (ActiveSnapshotSet()) |
259 | PopActiveSnapshot(); |
260 | |
261 | if (chain) |
262 | SaveTransactionCharacteristics(); |
263 | |
264 | CommitTransactionCommand(); |
265 | |
266 | if (chain) |
267 | { |
268 | StartTransactionCommand(); |
269 | RestoreTransactionCharacteristics(); |
270 | } |
271 | |
272 | MemoryContextSwitchTo(oldcontext); |
273 | |
274 | _SPI_current->internal_xact = false; |
275 | } |
276 | |
277 | void |
278 | SPI_commit(void) |
279 | { |
280 | _SPI_commit(false); |
281 | } |
282 | |
283 | void |
284 | SPI_commit_and_chain(void) |
285 | { |
286 | _SPI_commit(true); |
287 | } |
288 | |
289 | static void |
290 | _SPI_rollback(bool chain) |
291 | { |
292 | MemoryContext oldcontext = CurrentMemoryContext; |
293 | |
294 | if (_SPI_current->atomic) |
295 | ereport(ERROR, |
296 | (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), |
297 | errmsg("invalid transaction termination" ))); |
298 | |
299 | /* see under SPI_commit() */ |
300 | if (IsSubTransaction()) |
301 | ereport(ERROR, |
302 | (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), |
303 | errmsg("cannot roll back while a subtransaction is active" ))); |
304 | |
305 | /* |
306 | * Hold any pinned portals that any PLs might be using. We have to do |
307 | * this before changing transaction state, since this will run |
308 | * user-defined code that might throw an error, and in any case couldn't |
309 | * be run in an already-aborted transaction. |
310 | */ |
311 | HoldPinnedPortals(); |
312 | |
313 | /* Start the actual rollback */ |
314 | _SPI_current->internal_xact = true; |
315 | |
316 | if (chain) |
317 | SaveTransactionCharacteristics(); |
318 | |
319 | AbortCurrentTransaction(); |
320 | |
321 | if (chain) |
322 | { |
323 | StartTransactionCommand(); |
324 | RestoreTransactionCharacteristics(); |
325 | } |
326 | |
327 | MemoryContextSwitchTo(oldcontext); |
328 | |
329 | _SPI_current->internal_xact = false; |
330 | } |
331 | |
332 | void |
333 | SPI_rollback(void) |
334 | { |
335 | _SPI_rollback(false); |
336 | } |
337 | |
338 | void |
339 | SPI_rollback_and_chain(void) |
340 | { |
341 | _SPI_rollback(true); |
342 | } |
343 | |
344 | /* |
345 | * Clean up SPI state. Called on transaction end (of non-SPI-internal |
346 | * transactions) and when returning to the main loop on error. |
347 | */ |
348 | void |
349 | SPICleanup(void) |
350 | { |
351 | _SPI_current = NULL; |
352 | _SPI_connected = -1; |
353 | /* Reset API global variables, too */ |
354 | SPI_processed = 0; |
355 | SPI_tuptable = NULL; |
356 | SPI_result = 0; |
357 | } |
358 | |
359 | /* |
360 | * Clean up SPI state at transaction commit or abort. |
361 | */ |
362 | void |
363 | AtEOXact_SPI(bool isCommit) |
364 | { |
365 | /* Do nothing if the transaction end was initiated by SPI. */ |
366 | if (_SPI_current && _SPI_current->internal_xact) |
367 | return; |
368 | |
369 | if (isCommit && _SPI_connected != -1) |
370 | ereport(WARNING, |
371 | (errcode(ERRCODE_WARNING), |
372 | errmsg("transaction left non-empty SPI stack" ), |
373 | errhint("Check for missing \"SPI_finish\" calls." ))); |
374 | |
375 | SPICleanup(); |
376 | } |
377 | |
378 | /* |
379 | * Clean up SPI state at subtransaction commit or abort. |
380 | * |
381 | * During commit, there shouldn't be any unclosed entries remaining from |
382 | * the current subtransaction; we emit a warning if any are found. |
383 | */ |
384 | void |
385 | AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) |
386 | { |
387 | bool found = false; |
388 | |
389 | while (_SPI_connected >= 0) |
390 | { |
391 | _SPI_connection *connection = &(_SPI_stack[_SPI_connected]); |
392 | |
393 | if (connection->connectSubid != mySubid) |
394 | break; /* couldn't be any underneath it either */ |
395 | |
396 | if (connection->internal_xact) |
397 | break; |
398 | |
399 | found = true; |
400 | |
401 | /* |
402 | * Release procedure memory explicitly (see note in SPI_connect) |
403 | */ |
404 | if (connection->execCxt) |
405 | { |
406 | MemoryContextDelete(connection->execCxt); |
407 | connection->execCxt = NULL; |
408 | } |
409 | if (connection->procCxt) |
410 | { |
411 | MemoryContextDelete(connection->procCxt); |
412 | connection->procCxt = NULL; |
413 | } |
414 | |
415 | /* |
416 | * Restore outer global variables and pop the stack entry. Unlike |
417 | * SPI_finish(), we don't risk switching to memory contexts that might |
418 | * be already gone. |
419 | */ |
420 | SPI_processed = connection->outer_processed; |
421 | SPI_tuptable = connection->outer_tuptable; |
422 | SPI_result = connection->outer_result; |
423 | |
424 | _SPI_connected--; |
425 | if (_SPI_connected < 0) |
426 | _SPI_current = NULL; |
427 | else |
428 | _SPI_current = &(_SPI_stack[_SPI_connected]); |
429 | } |
430 | |
431 | if (found && isCommit) |
432 | ereport(WARNING, |
433 | (errcode(ERRCODE_WARNING), |
434 | errmsg("subtransaction left non-empty SPI stack" ), |
435 | errhint("Check for missing \"SPI_finish\" calls." ))); |
436 | |
437 | /* |
438 | * If we are aborting a subtransaction and there is an open SPI context |
439 | * surrounding the subxact, clean up to prevent memory leakage. |
440 | */ |
441 | if (_SPI_current && !isCommit) |
442 | { |
443 | slist_mutable_iter siter; |
444 | |
445 | /* |
446 | * Throw away executor state if current executor operation was started |
447 | * within current subxact (essentially, force a _SPI_end_call(true)). |
448 | */ |
449 | if (_SPI_current->execSubid >= mySubid) |
450 | { |
451 | _SPI_current->execSubid = InvalidSubTransactionId; |
452 | MemoryContextResetAndDeleteChildren(_SPI_current->execCxt); |
453 | } |
454 | |
455 | /* throw away any tuple tables created within current subxact */ |
456 | slist_foreach_modify(siter, &_SPI_current->tuptables) |
457 | { |
458 | SPITupleTable *tuptable; |
459 | |
460 | tuptable = slist_container(SPITupleTable, next, siter.cur); |
461 | if (tuptable->subid >= mySubid) |
462 | { |
463 | /* |
464 | * If we used SPI_freetuptable() here, its internal search of |
465 | * the tuptables list would make this operation O(N^2). |
466 | * Instead, just free the tuptable manually. This should |
467 | * match what SPI_freetuptable() does. |
468 | */ |
469 | slist_delete_current(&siter); |
470 | if (tuptable == _SPI_current->tuptable) |
471 | _SPI_current->tuptable = NULL; |
472 | if (tuptable == SPI_tuptable) |
473 | SPI_tuptable = NULL; |
474 | MemoryContextDelete(tuptable->tuptabcxt); |
475 | } |
476 | } |
477 | } |
478 | } |
479 | |
480 | /* |
481 | * Are we executing inside a procedure (that is, a nonatomic SPI context)? |
482 | */ |
483 | bool |
484 | SPI_inside_nonatomic_context(void) |
485 | { |
486 | if (_SPI_current == NULL) |
487 | return false; /* not in any SPI context at all */ |
488 | if (_SPI_current->atomic) |
489 | return false; /* it's atomic (ie function not procedure) */ |
490 | return true; |
491 | } |
492 | |
493 | |
494 | /* Parse, plan, and execute a query string */ |
495 | int |
496 | SPI_execute(const char *src, bool read_only, long tcount) |
497 | { |
498 | _SPI_plan plan; |
499 | int res; |
500 | |
501 | if (src == NULL || tcount < 0) |
502 | return SPI_ERROR_ARGUMENT; |
503 | |
504 | res = _SPI_begin_call(true); |
505 | if (res < 0) |
506 | return res; |
507 | |
508 | memset(&plan, 0, sizeof(_SPI_plan)); |
509 | plan.magic = _SPI_PLAN_MAGIC; |
510 | plan.cursor_options = CURSOR_OPT_PARALLEL_OK; |
511 | |
512 | _SPI_prepare_oneshot_plan(src, &plan); |
513 | |
514 | res = _SPI_execute_plan(&plan, NULL, |
515 | InvalidSnapshot, InvalidSnapshot, |
516 | read_only, true, tcount); |
517 | |
518 | _SPI_end_call(true); |
519 | return res; |
520 | } |
521 | |
522 | /* Obsolete version of SPI_execute */ |
523 | int |
524 | SPI_exec(const char *src, long tcount) |
525 | { |
526 | return SPI_execute(src, false, tcount); |
527 | } |
528 | |
529 | /* Execute a previously prepared plan */ |
530 | int |
531 | SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls, |
532 | bool read_only, long tcount) |
533 | { |
534 | int res; |
535 | |
536 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0) |
537 | return SPI_ERROR_ARGUMENT; |
538 | |
539 | if (plan->nargs > 0 && Values == NULL) |
540 | return SPI_ERROR_PARAM; |
541 | |
542 | res = _SPI_begin_call(true); |
543 | if (res < 0) |
544 | return res; |
545 | |
546 | res = _SPI_execute_plan(plan, |
547 | _SPI_convert_params(plan->nargs, plan->argtypes, |
548 | Values, Nulls), |
549 | InvalidSnapshot, InvalidSnapshot, |
550 | read_only, true, tcount); |
551 | |
552 | _SPI_end_call(true); |
553 | return res; |
554 | } |
555 | |
556 | /* Obsolete version of SPI_execute_plan */ |
557 | int |
558 | SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount) |
559 | { |
560 | return SPI_execute_plan(plan, Values, Nulls, false, tcount); |
561 | } |
562 | |
563 | /* Execute a previously prepared plan */ |
564 | int |
565 | SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params, |
566 | bool read_only, long tcount) |
567 | { |
568 | int res; |
569 | |
570 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0) |
571 | return SPI_ERROR_ARGUMENT; |
572 | |
573 | res = _SPI_begin_call(true); |
574 | if (res < 0) |
575 | return res; |
576 | |
577 | res = _SPI_execute_plan(plan, params, |
578 | InvalidSnapshot, InvalidSnapshot, |
579 | read_only, true, tcount); |
580 | |
581 | _SPI_end_call(true); |
582 | return res; |
583 | } |
584 | |
585 | /* |
586 | * SPI_execute_snapshot -- identical to SPI_execute_plan, except that we allow |
587 | * the caller to specify exactly which snapshots to use, which will be |
588 | * registered here. Also, the caller may specify that AFTER triggers should be |
589 | * queued as part of the outer query rather than being fired immediately at the |
590 | * end of the command. |
591 | * |
592 | * This is currently not documented in spi.sgml because it is only intended |
593 | * for use by RI triggers. |
594 | * |
595 | * Passing snapshot == InvalidSnapshot will select the normal behavior of |
596 | * fetching a new snapshot for each query. |
597 | */ |
598 | int |
599 | SPI_execute_snapshot(SPIPlanPtr plan, |
600 | Datum *Values, const char *Nulls, |
601 | Snapshot snapshot, Snapshot crosscheck_snapshot, |
602 | bool read_only, bool fire_triggers, long tcount) |
603 | { |
604 | int res; |
605 | |
606 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0) |
607 | return SPI_ERROR_ARGUMENT; |
608 | |
609 | if (plan->nargs > 0 && Values == NULL) |
610 | return SPI_ERROR_PARAM; |
611 | |
612 | res = _SPI_begin_call(true); |
613 | if (res < 0) |
614 | return res; |
615 | |
616 | res = _SPI_execute_plan(plan, |
617 | _SPI_convert_params(plan->nargs, plan->argtypes, |
618 | Values, Nulls), |
619 | snapshot, crosscheck_snapshot, |
620 | read_only, fire_triggers, tcount); |
621 | |
622 | _SPI_end_call(true); |
623 | return res; |
624 | } |
625 | |
626 | /* |
627 | * SPI_execute_with_args -- plan and execute a query with supplied arguments |
628 | * |
629 | * This is functionally equivalent to SPI_prepare followed by |
630 | * SPI_execute_plan. |
631 | */ |
632 | int |
633 | SPI_execute_with_args(const char *src, |
634 | int nargs, Oid *argtypes, |
635 | Datum *Values, const char *Nulls, |
636 | bool read_only, long tcount) |
637 | { |
638 | int res; |
639 | _SPI_plan plan; |
640 | ParamListInfo paramLI; |
641 | |
642 | if (src == NULL || nargs < 0 || tcount < 0) |
643 | return SPI_ERROR_ARGUMENT; |
644 | |
645 | if (nargs > 0 && (argtypes == NULL || Values == NULL)) |
646 | return SPI_ERROR_PARAM; |
647 | |
648 | res = _SPI_begin_call(true); |
649 | if (res < 0) |
650 | return res; |
651 | |
652 | memset(&plan, 0, sizeof(_SPI_plan)); |
653 | plan.magic = _SPI_PLAN_MAGIC; |
654 | plan.cursor_options = CURSOR_OPT_PARALLEL_OK; |
655 | plan.nargs = nargs; |
656 | plan.argtypes = argtypes; |
657 | plan.parserSetup = NULL; |
658 | plan.parserSetupArg = NULL; |
659 | |
660 | paramLI = _SPI_convert_params(nargs, argtypes, |
661 | Values, Nulls); |
662 | |
663 | _SPI_prepare_oneshot_plan(src, &plan); |
664 | |
665 | res = _SPI_execute_plan(&plan, paramLI, |
666 | InvalidSnapshot, InvalidSnapshot, |
667 | read_only, true, tcount); |
668 | |
669 | _SPI_end_call(true); |
670 | return res; |
671 | } |
672 | |
673 | SPIPlanPtr |
674 | SPI_prepare(const char *src, int nargs, Oid *argtypes) |
675 | { |
676 | return SPI_prepare_cursor(src, nargs, argtypes, 0); |
677 | } |
678 | |
679 | SPIPlanPtr |
680 | SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes, |
681 | int cursorOptions) |
682 | { |
683 | _SPI_plan plan; |
684 | SPIPlanPtr result; |
685 | |
686 | if (src == NULL || nargs < 0 || (nargs > 0 && argtypes == NULL)) |
687 | { |
688 | SPI_result = SPI_ERROR_ARGUMENT; |
689 | return NULL; |
690 | } |
691 | |
692 | SPI_result = _SPI_begin_call(true); |
693 | if (SPI_result < 0) |
694 | return NULL; |
695 | |
696 | memset(&plan, 0, sizeof(_SPI_plan)); |
697 | plan.magic = _SPI_PLAN_MAGIC; |
698 | plan.cursor_options = cursorOptions; |
699 | plan.nargs = nargs; |
700 | plan.argtypes = argtypes; |
701 | plan.parserSetup = NULL; |
702 | plan.parserSetupArg = NULL; |
703 | |
704 | _SPI_prepare_plan(src, &plan); |
705 | |
706 | /* copy plan to procedure context */ |
707 | result = _SPI_make_plan_non_temp(&plan); |
708 | |
709 | _SPI_end_call(true); |
710 | |
711 | return result; |
712 | } |
713 | |
714 | SPIPlanPtr |
715 | SPI_prepare_params(const char *src, |
716 | ParserSetupHook parserSetup, |
717 | void *parserSetupArg, |
718 | int cursorOptions) |
719 | { |
720 | _SPI_plan plan; |
721 | SPIPlanPtr result; |
722 | |
723 | if (src == NULL) |
724 | { |
725 | SPI_result = SPI_ERROR_ARGUMENT; |
726 | return NULL; |
727 | } |
728 | |
729 | SPI_result = _SPI_begin_call(true); |
730 | if (SPI_result < 0) |
731 | return NULL; |
732 | |
733 | memset(&plan, 0, sizeof(_SPI_plan)); |
734 | plan.magic = _SPI_PLAN_MAGIC; |
735 | plan.cursor_options = cursorOptions; |
736 | plan.nargs = 0; |
737 | plan.argtypes = NULL; |
738 | plan.parserSetup = parserSetup; |
739 | plan.parserSetupArg = parserSetupArg; |
740 | |
741 | _SPI_prepare_plan(src, &plan); |
742 | |
743 | /* copy plan to procedure context */ |
744 | result = _SPI_make_plan_non_temp(&plan); |
745 | |
746 | _SPI_end_call(true); |
747 | |
748 | return result; |
749 | } |
750 | |
751 | int |
752 | SPI_keepplan(SPIPlanPtr plan) |
753 | { |
754 | ListCell *lc; |
755 | |
756 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || |
757 | plan->saved || plan->oneshot) |
758 | return SPI_ERROR_ARGUMENT; |
759 | |
760 | /* |
761 | * Mark it saved, reparent it under CacheMemoryContext, and mark all the |
762 | * component CachedPlanSources as saved. This sequence cannot fail |
763 | * partway through, so there's no risk of long-term memory leakage. |
764 | */ |
765 | plan->saved = true; |
766 | MemoryContextSetParent(plan->plancxt, CacheMemoryContext); |
767 | |
768 | foreach(lc, plan->plancache_list) |
769 | { |
770 | CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc); |
771 | |
772 | SaveCachedPlan(plansource); |
773 | } |
774 | |
775 | return 0; |
776 | } |
777 | |
778 | SPIPlanPtr |
779 | SPI_saveplan(SPIPlanPtr plan) |
780 | { |
781 | SPIPlanPtr newplan; |
782 | |
783 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC) |
784 | { |
785 | SPI_result = SPI_ERROR_ARGUMENT; |
786 | return NULL; |
787 | } |
788 | |
789 | SPI_result = _SPI_begin_call(false); /* don't change context */ |
790 | if (SPI_result < 0) |
791 | return NULL; |
792 | |
793 | newplan = _SPI_save_plan(plan); |
794 | |
795 | SPI_result = _SPI_end_call(false); |
796 | |
797 | return newplan; |
798 | } |
799 | |
800 | int |
801 | SPI_freeplan(SPIPlanPtr plan) |
802 | { |
803 | ListCell *lc; |
804 | |
805 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC) |
806 | return SPI_ERROR_ARGUMENT; |
807 | |
808 | /* Release the plancache entries */ |
809 | foreach(lc, plan->plancache_list) |
810 | { |
811 | CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc); |
812 | |
813 | DropCachedPlan(plansource); |
814 | } |
815 | |
816 | /* Now get rid of the _SPI_plan and subsidiary data in its plancxt */ |
817 | MemoryContextDelete(plan->plancxt); |
818 | |
819 | return 0; |
820 | } |
821 | |
822 | HeapTuple |
823 | SPI_copytuple(HeapTuple tuple) |
824 | { |
825 | MemoryContext oldcxt; |
826 | HeapTuple ctuple; |
827 | |
828 | if (tuple == NULL) |
829 | { |
830 | SPI_result = SPI_ERROR_ARGUMENT; |
831 | return NULL; |
832 | } |
833 | |
834 | if (_SPI_current == NULL) |
835 | { |
836 | SPI_result = SPI_ERROR_UNCONNECTED; |
837 | return NULL; |
838 | } |
839 | |
840 | oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt); |
841 | |
842 | ctuple = heap_copytuple(tuple); |
843 | |
844 | MemoryContextSwitchTo(oldcxt); |
845 | |
846 | return ctuple; |
847 | } |
848 | |
849 | HeapTupleHeader |
850 | SPI_returntuple(HeapTuple tuple, TupleDesc tupdesc) |
851 | { |
852 | MemoryContext oldcxt; |
853 | HeapTupleHeader dtup; |
854 | |
855 | if (tuple == NULL || tupdesc == NULL) |
856 | { |
857 | SPI_result = SPI_ERROR_ARGUMENT; |
858 | return NULL; |
859 | } |
860 | |
861 | if (_SPI_current == NULL) |
862 | { |
863 | SPI_result = SPI_ERROR_UNCONNECTED; |
864 | return NULL; |
865 | } |
866 | |
867 | /* For RECORD results, make sure a typmod has been assigned */ |
868 | if (tupdesc->tdtypeid == RECORDOID && |
869 | tupdesc->tdtypmod < 0) |
870 | assign_record_type_typmod(tupdesc); |
871 | |
872 | oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt); |
873 | |
874 | dtup = DatumGetHeapTupleHeader(heap_copy_tuple_as_datum(tuple, tupdesc)); |
875 | |
876 | MemoryContextSwitchTo(oldcxt); |
877 | |
878 | return dtup; |
879 | } |
880 | |
881 | HeapTuple |
882 | SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum, |
883 | Datum *Values, const char *Nulls) |
884 | { |
885 | MemoryContext oldcxt; |
886 | HeapTuple mtuple; |
887 | int numberOfAttributes; |
888 | Datum *v; |
889 | bool *n; |
890 | int i; |
891 | |
892 | if (rel == NULL || tuple == NULL || natts < 0 || attnum == NULL || Values == NULL) |
893 | { |
894 | SPI_result = SPI_ERROR_ARGUMENT; |
895 | return NULL; |
896 | } |
897 | |
898 | if (_SPI_current == NULL) |
899 | { |
900 | SPI_result = SPI_ERROR_UNCONNECTED; |
901 | return NULL; |
902 | } |
903 | |
904 | oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt); |
905 | |
906 | SPI_result = 0; |
907 | |
908 | numberOfAttributes = rel->rd_att->natts; |
909 | v = (Datum *) palloc(numberOfAttributes * sizeof(Datum)); |
910 | n = (bool *) palloc(numberOfAttributes * sizeof(bool)); |
911 | |
912 | /* fetch old values and nulls */ |
913 | heap_deform_tuple(tuple, rel->rd_att, v, n); |
914 | |
915 | /* replace values and nulls */ |
916 | for (i = 0; i < natts; i++) |
917 | { |
918 | if (attnum[i] <= 0 || attnum[i] > numberOfAttributes) |
919 | break; |
920 | v[attnum[i] - 1] = Values[i]; |
921 | n[attnum[i] - 1] = (Nulls && Nulls[i] == 'n') ? true : false; |
922 | } |
923 | |
924 | if (i == natts) /* no errors in *attnum */ |
925 | { |
926 | mtuple = heap_form_tuple(rel->rd_att, v, n); |
927 | |
928 | /* |
929 | * copy the identification info of the old tuple: t_ctid, t_self, and |
930 | * OID (if any) |
931 | */ |
932 | mtuple->t_data->t_ctid = tuple->t_data->t_ctid; |
933 | mtuple->t_self = tuple->t_self; |
934 | mtuple->t_tableOid = tuple->t_tableOid; |
935 | } |
936 | else |
937 | { |
938 | mtuple = NULL; |
939 | SPI_result = SPI_ERROR_NOATTRIBUTE; |
940 | } |
941 | |
942 | pfree(v); |
943 | pfree(n); |
944 | |
945 | MemoryContextSwitchTo(oldcxt); |
946 | |
947 | return mtuple; |
948 | } |
949 | |
950 | int |
951 | SPI_fnumber(TupleDesc tupdesc, const char *fname) |
952 | { |
953 | int res; |
954 | const FormData_pg_attribute *sysatt; |
955 | |
956 | for (res = 0; res < tupdesc->natts; res++) |
957 | { |
958 | Form_pg_attribute attr = TupleDescAttr(tupdesc, res); |
959 | |
960 | if (namestrcmp(&attr->attname, fname) == 0 && |
961 | !attr->attisdropped) |
962 | return res + 1; |
963 | } |
964 | |
965 | sysatt = SystemAttributeByName(fname); |
966 | if (sysatt != NULL) |
967 | return sysatt->attnum; |
968 | |
969 | /* SPI_ERROR_NOATTRIBUTE is different from all sys column numbers */ |
970 | return SPI_ERROR_NOATTRIBUTE; |
971 | } |
972 | |
973 | char * |
974 | SPI_fname(TupleDesc tupdesc, int fnumber) |
975 | { |
976 | const FormData_pg_attribute *att; |
977 | |
978 | SPI_result = 0; |
979 | |
980 | if (fnumber > tupdesc->natts || fnumber == 0 || |
981 | fnumber <= FirstLowInvalidHeapAttributeNumber) |
982 | { |
983 | SPI_result = SPI_ERROR_NOATTRIBUTE; |
984 | return NULL; |
985 | } |
986 | |
987 | if (fnumber > 0) |
988 | att = TupleDescAttr(tupdesc, fnumber - 1); |
989 | else |
990 | att = SystemAttributeDefinition(fnumber); |
991 | |
992 | return pstrdup(NameStr(att->attname)); |
993 | } |
994 | |
995 | char * |
996 | SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber) |
997 | { |
998 | Datum val; |
999 | bool isnull; |
1000 | Oid typoid, |
1001 | foutoid; |
1002 | bool typisvarlena; |
1003 | |
1004 | SPI_result = 0; |
1005 | |
1006 | if (fnumber > tupdesc->natts || fnumber == 0 || |
1007 | fnumber <= FirstLowInvalidHeapAttributeNumber) |
1008 | { |
1009 | SPI_result = SPI_ERROR_NOATTRIBUTE; |
1010 | return NULL; |
1011 | } |
1012 | |
1013 | val = heap_getattr(tuple, fnumber, tupdesc, &isnull); |
1014 | if (isnull) |
1015 | return NULL; |
1016 | |
1017 | if (fnumber > 0) |
1018 | typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid; |
1019 | else |
1020 | typoid = (SystemAttributeDefinition(fnumber))->atttypid; |
1021 | |
1022 | getTypeOutputInfo(typoid, &foutoid, &typisvarlena); |
1023 | |
1024 | return OidOutputFunctionCall(foutoid, val); |
1025 | } |
1026 | |
1027 | Datum |
1028 | SPI_getbinval(HeapTuple tuple, TupleDesc tupdesc, int fnumber, bool *isnull) |
1029 | { |
1030 | SPI_result = 0; |
1031 | |
1032 | if (fnumber > tupdesc->natts || fnumber == 0 || |
1033 | fnumber <= FirstLowInvalidHeapAttributeNumber) |
1034 | { |
1035 | SPI_result = SPI_ERROR_NOATTRIBUTE; |
1036 | *isnull = true; |
1037 | return (Datum) NULL; |
1038 | } |
1039 | |
1040 | return heap_getattr(tuple, fnumber, tupdesc, isnull); |
1041 | } |
1042 | |
1043 | char * |
1044 | SPI_gettype(TupleDesc tupdesc, int fnumber) |
1045 | { |
1046 | Oid typoid; |
1047 | HeapTuple typeTuple; |
1048 | char *result; |
1049 | |
1050 | SPI_result = 0; |
1051 | |
1052 | if (fnumber > tupdesc->natts || fnumber == 0 || |
1053 | fnumber <= FirstLowInvalidHeapAttributeNumber) |
1054 | { |
1055 | SPI_result = SPI_ERROR_NOATTRIBUTE; |
1056 | return NULL; |
1057 | } |
1058 | |
1059 | if (fnumber > 0) |
1060 | typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid; |
1061 | else |
1062 | typoid = (SystemAttributeDefinition(fnumber))->atttypid; |
1063 | |
1064 | typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid)); |
1065 | |
1066 | if (!HeapTupleIsValid(typeTuple)) |
1067 | { |
1068 | SPI_result = SPI_ERROR_TYPUNKNOWN; |
1069 | return NULL; |
1070 | } |
1071 | |
1072 | result = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); |
1073 | ReleaseSysCache(typeTuple); |
1074 | return result; |
1075 | } |
1076 | |
1077 | /* |
1078 | * Get the data type OID for a column. |
1079 | * |
1080 | * There's nothing similar for typmod and typcollation. The rare consumers |
1081 | * thereof should inspect the TupleDesc directly. |
1082 | */ |
1083 | Oid |
1084 | SPI_gettypeid(TupleDesc tupdesc, int fnumber) |
1085 | { |
1086 | SPI_result = 0; |
1087 | |
1088 | if (fnumber > tupdesc->natts || fnumber == 0 || |
1089 | fnumber <= FirstLowInvalidHeapAttributeNumber) |
1090 | { |
1091 | SPI_result = SPI_ERROR_NOATTRIBUTE; |
1092 | return InvalidOid; |
1093 | } |
1094 | |
1095 | if (fnumber > 0) |
1096 | return TupleDescAttr(tupdesc, fnumber - 1)->atttypid; |
1097 | else |
1098 | return (SystemAttributeDefinition(fnumber))->atttypid; |
1099 | } |
1100 | |
1101 | char * |
1102 | SPI_getrelname(Relation rel) |
1103 | { |
1104 | return pstrdup(RelationGetRelationName(rel)); |
1105 | } |
1106 | |
1107 | char * |
1108 | SPI_getnspname(Relation rel) |
1109 | { |
1110 | return get_namespace_name(RelationGetNamespace(rel)); |
1111 | } |
1112 | |
1113 | void * |
1114 | SPI_palloc(Size size) |
1115 | { |
1116 | if (_SPI_current == NULL) |
1117 | elog(ERROR, "SPI_palloc called while not connected to SPI" ); |
1118 | |
1119 | return MemoryContextAlloc(_SPI_current->savedcxt, size); |
1120 | } |
1121 | |
1122 | void * |
1123 | SPI_repalloc(void *pointer, Size size) |
1124 | { |
1125 | /* No longer need to worry which context chunk was in... */ |
1126 | return repalloc(pointer, size); |
1127 | } |
1128 | |
1129 | void |
1130 | SPI_pfree(void *pointer) |
1131 | { |
1132 | /* No longer need to worry which context chunk was in... */ |
1133 | pfree(pointer); |
1134 | } |
1135 | |
1136 | Datum |
1137 | SPI_datumTransfer(Datum value, bool typByVal, int typLen) |
1138 | { |
1139 | MemoryContext oldcxt; |
1140 | Datum result; |
1141 | |
1142 | if (_SPI_current == NULL) |
1143 | elog(ERROR, "SPI_datumTransfer called while not connected to SPI" ); |
1144 | |
1145 | oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt); |
1146 | |
1147 | result = datumTransfer(value, typByVal, typLen); |
1148 | |
1149 | MemoryContextSwitchTo(oldcxt); |
1150 | |
1151 | return result; |
1152 | } |
1153 | |
1154 | void |
1155 | SPI_freetuple(HeapTuple tuple) |
1156 | { |
1157 | /* No longer need to worry which context tuple was in... */ |
1158 | heap_freetuple(tuple); |
1159 | } |
1160 | |
1161 | void |
1162 | SPI_freetuptable(SPITupleTable *tuptable) |
1163 | { |
1164 | bool found = false; |
1165 | |
1166 | /* ignore call if NULL pointer */ |
1167 | if (tuptable == NULL) |
1168 | return; |
1169 | |
1170 | /* |
1171 | * Search only the topmost SPI context for a matching tuple table. |
1172 | */ |
1173 | if (_SPI_current != NULL) |
1174 | { |
1175 | slist_mutable_iter siter; |
1176 | |
1177 | /* find tuptable in active list, then remove it */ |
1178 | slist_foreach_modify(siter, &_SPI_current->tuptables) |
1179 | { |
1180 | SPITupleTable *tt; |
1181 | |
1182 | tt = slist_container(SPITupleTable, next, siter.cur); |
1183 | if (tt == tuptable) |
1184 | { |
1185 | slist_delete_current(&siter); |
1186 | found = true; |
1187 | break; |
1188 | } |
1189 | } |
1190 | } |
1191 | |
1192 | /* |
1193 | * Refuse the deletion if we didn't find it in the topmost SPI context. |
1194 | * This is primarily a guard against double deletion, but might prevent |
1195 | * other errors as well. Since the worst consequence of not deleting a |
1196 | * tuptable would be a transient memory leak, this is just a WARNING. |
1197 | */ |
1198 | if (!found) |
1199 | { |
1200 | elog(WARNING, "attempt to delete invalid SPITupleTable %p" , tuptable); |
1201 | return; |
1202 | } |
1203 | |
1204 | /* for safety, reset global variables that might point at tuptable */ |
1205 | if (tuptable == _SPI_current->tuptable) |
1206 | _SPI_current->tuptable = NULL; |
1207 | if (tuptable == SPI_tuptable) |
1208 | SPI_tuptable = NULL; |
1209 | |
1210 | /* release all memory belonging to tuptable */ |
1211 | MemoryContextDelete(tuptable->tuptabcxt); |
1212 | } |
1213 | |
1214 | |
1215 | /* |
1216 | * SPI_cursor_open() |
1217 | * |
1218 | * Open a prepared SPI plan as a portal |
1219 | */ |
1220 | Portal |
1221 | SPI_cursor_open(const char *name, SPIPlanPtr plan, |
1222 | Datum *Values, const char *Nulls, |
1223 | bool read_only) |
1224 | { |
1225 | Portal portal; |
1226 | ParamListInfo paramLI; |
1227 | |
1228 | /* build transient ParamListInfo in caller's context */ |
1229 | paramLI = _SPI_convert_params(plan->nargs, plan->argtypes, |
1230 | Values, Nulls); |
1231 | |
1232 | portal = SPI_cursor_open_internal(name, plan, paramLI, read_only); |
1233 | |
1234 | /* done with the transient ParamListInfo */ |
1235 | if (paramLI) |
1236 | pfree(paramLI); |
1237 | |
1238 | return portal; |
1239 | } |
1240 | |
1241 | |
1242 | /* |
1243 | * SPI_cursor_open_with_args() |
1244 | * |
1245 | * Parse and plan a query and open it as a portal. |
1246 | */ |
1247 | Portal |
1248 | SPI_cursor_open_with_args(const char *name, |
1249 | const char *src, |
1250 | int nargs, Oid *argtypes, |
1251 | Datum *Values, const char *Nulls, |
1252 | bool read_only, int cursorOptions) |
1253 | { |
1254 | Portal result; |
1255 | _SPI_plan plan; |
1256 | ParamListInfo paramLI; |
1257 | |
1258 | if (src == NULL || nargs < 0) |
1259 | elog(ERROR, "SPI_cursor_open_with_args called with invalid arguments" ); |
1260 | |
1261 | if (nargs > 0 && (argtypes == NULL || Values == NULL)) |
1262 | elog(ERROR, "SPI_cursor_open_with_args called with missing parameters" ); |
1263 | |
1264 | SPI_result = _SPI_begin_call(true); |
1265 | if (SPI_result < 0) |
1266 | elog(ERROR, "SPI_cursor_open_with_args called while not connected" ); |
1267 | |
1268 | memset(&plan, 0, sizeof(_SPI_plan)); |
1269 | plan.magic = _SPI_PLAN_MAGIC; |
1270 | plan.cursor_options = cursorOptions; |
1271 | plan.nargs = nargs; |
1272 | plan.argtypes = argtypes; |
1273 | plan.parserSetup = NULL; |
1274 | plan.parserSetupArg = NULL; |
1275 | |
1276 | /* build transient ParamListInfo in executor context */ |
1277 | paramLI = _SPI_convert_params(nargs, argtypes, |
1278 | Values, Nulls); |
1279 | |
1280 | _SPI_prepare_plan(src, &plan); |
1281 | |
1282 | /* We needn't copy the plan; SPI_cursor_open_internal will do so */ |
1283 | |
1284 | result = SPI_cursor_open_internal(name, &plan, paramLI, read_only); |
1285 | |
1286 | /* And clean up */ |
1287 | _SPI_end_call(true); |
1288 | |
1289 | return result; |
1290 | } |
1291 | |
1292 | |
1293 | /* |
1294 | * SPI_cursor_open_with_paramlist() |
1295 | * |
1296 | * Same as SPI_cursor_open except that parameters (if any) are passed |
1297 | * as a ParamListInfo, which supports dynamic parameter set determination |
1298 | */ |
1299 | Portal |
1300 | SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan, |
1301 | ParamListInfo params, bool read_only) |
1302 | { |
1303 | return SPI_cursor_open_internal(name, plan, params, read_only); |
1304 | } |
1305 | |
1306 | |
1307 | /* |
1308 | * SPI_cursor_open_internal() |
1309 | * |
1310 | * Common code for SPI_cursor_open variants |
1311 | */ |
1312 | static Portal |
1313 | SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, |
1314 | ParamListInfo paramLI, bool read_only) |
1315 | { |
1316 | CachedPlanSource *plansource; |
1317 | CachedPlan *cplan; |
1318 | List *stmt_list; |
1319 | char *query_string; |
1320 | Snapshot snapshot; |
1321 | MemoryContext oldcontext; |
1322 | Portal portal; |
1323 | ErrorContextCallback spierrcontext; |
1324 | |
1325 | /* |
1326 | * Check that the plan is something the Portal code will special-case as |
1327 | * returning one tupleset. |
1328 | */ |
1329 | if (!SPI_is_cursor_plan(plan)) |
1330 | { |
1331 | /* try to give a good error message */ |
1332 | if (list_length(plan->plancache_list) != 1) |
1333 | ereport(ERROR, |
1334 | (errcode(ERRCODE_INVALID_CURSOR_DEFINITION), |
1335 | errmsg("cannot open multi-query plan as cursor" ))); |
1336 | plansource = (CachedPlanSource *) linitial(plan->plancache_list); |
1337 | ereport(ERROR, |
1338 | (errcode(ERRCODE_INVALID_CURSOR_DEFINITION), |
1339 | /* translator: %s is name of a SQL command, eg INSERT */ |
1340 | errmsg("cannot open %s query as cursor" , |
1341 | plansource->commandTag))); |
1342 | } |
1343 | |
1344 | Assert(list_length(plan->plancache_list) == 1); |
1345 | plansource = (CachedPlanSource *) linitial(plan->plancache_list); |
1346 | |
1347 | /* Push the SPI stack */ |
1348 | if (_SPI_begin_call(true) < 0) |
1349 | elog(ERROR, "SPI_cursor_open called while not connected" ); |
1350 | |
1351 | /* Reset SPI result (note we deliberately don't touch lastoid) */ |
1352 | SPI_processed = 0; |
1353 | SPI_tuptable = NULL; |
1354 | _SPI_current->processed = 0; |
1355 | _SPI_current->tuptable = NULL; |
1356 | |
1357 | /* Create the portal */ |
1358 | if (name == NULL || name[0] == '\0') |
1359 | { |
1360 | /* Use a random nonconflicting name */ |
1361 | portal = CreateNewPortal(); |
1362 | } |
1363 | else |
1364 | { |
1365 | /* In this path, error if portal of same name already exists */ |
1366 | portal = CreatePortal(name, false, false); |
1367 | } |
1368 | |
1369 | /* Copy the plan's query string into the portal */ |
1370 | query_string = MemoryContextStrdup(portal->portalContext, |
1371 | plansource->query_string); |
1372 | |
1373 | /* |
1374 | * Setup error traceback support for ereport(), in case GetCachedPlan |
1375 | * throws an error. |
1376 | */ |
1377 | spierrcontext.callback = _SPI_error_callback; |
1378 | spierrcontext.arg = unconstify(char *, plansource->query_string); |
1379 | spierrcontext.previous = error_context_stack; |
1380 | error_context_stack = &spierrcontext; |
1381 | |
1382 | /* |
1383 | * Note: for a saved plan, we mustn't have any failure occur between |
1384 | * GetCachedPlan and PortalDefineQuery; that would result in leaking our |
1385 | * plancache refcount. |
1386 | */ |
1387 | |
1388 | /* Replan if needed, and increment plan refcount for portal */ |
1389 | cplan = GetCachedPlan(plansource, paramLI, false, _SPI_current->queryEnv); |
1390 | stmt_list = cplan->stmt_list; |
1391 | |
1392 | if (!plan->saved) |
1393 | { |
1394 | /* |
1395 | * We don't want the portal to depend on an unsaved CachedPlanSource, |
1396 | * so must copy the plan into the portal's context. An error here |
1397 | * will result in leaking our refcount on the plan, but it doesn't |
1398 | * matter because the plan is unsaved and hence transient anyway. |
1399 | */ |
1400 | oldcontext = MemoryContextSwitchTo(portal->portalContext); |
1401 | stmt_list = copyObject(stmt_list); |
1402 | MemoryContextSwitchTo(oldcontext); |
1403 | ReleaseCachedPlan(cplan, false); |
1404 | cplan = NULL; /* portal shouldn't depend on cplan */ |
1405 | } |
1406 | |
1407 | /* |
1408 | * Set up the portal. |
1409 | */ |
1410 | PortalDefineQuery(portal, |
1411 | NULL, /* no statement name */ |
1412 | query_string, |
1413 | plansource->commandTag, |
1414 | stmt_list, |
1415 | cplan); |
1416 | |
1417 | /* |
1418 | * Set up options for portal. Default SCROLL type is chosen the same way |
1419 | * as PerformCursorOpen does it. |
1420 | */ |
1421 | portal->cursorOptions = plan->cursor_options; |
1422 | if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL))) |
1423 | { |
1424 | if (list_length(stmt_list) == 1 && |
1425 | linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && |
1426 | linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL && |
1427 | ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree)) |
1428 | portal->cursorOptions |= CURSOR_OPT_SCROLL; |
1429 | else |
1430 | portal->cursorOptions |= CURSOR_OPT_NO_SCROLL; |
1431 | } |
1432 | |
1433 | /* |
1434 | * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the |
1435 | * check in transformDeclareCursorStmt because the cursor options might |
1436 | * not have come through there. |
1437 | */ |
1438 | if (portal->cursorOptions & CURSOR_OPT_SCROLL) |
1439 | { |
1440 | if (list_length(stmt_list) == 1 && |
1441 | linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && |
1442 | linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL) |
1443 | ereport(ERROR, |
1444 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
1445 | errmsg("DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported" ), |
1446 | errdetail("Scrollable cursors must be READ ONLY." ))); |
1447 | } |
1448 | |
1449 | /* Make current query environment available to portal at execution time. */ |
1450 | portal->queryEnv = _SPI_current->queryEnv; |
1451 | |
1452 | /* |
1453 | * If told to be read-only, or in parallel mode, verify that this query is |
1454 | * in fact read-only. This can't be done earlier because we need to look |
1455 | * at the finished, planned queries. (In particular, we don't want to do |
1456 | * it between GetCachedPlan and PortalDefineQuery, because throwing an |
1457 | * error between those steps would result in leaking our plancache |
1458 | * refcount.) |
1459 | */ |
1460 | if (read_only || IsInParallelMode()) |
1461 | { |
1462 | ListCell *lc; |
1463 | |
1464 | foreach(lc, stmt_list) |
1465 | { |
1466 | PlannedStmt *pstmt = lfirst_node(PlannedStmt, lc); |
1467 | |
1468 | if (!CommandIsReadOnly(pstmt)) |
1469 | { |
1470 | if (read_only) |
1471 | ereport(ERROR, |
1472 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
1473 | /* translator: %s is a SQL statement name */ |
1474 | errmsg("%s is not allowed in a non-volatile function" , |
1475 | CreateCommandTag((Node *) pstmt)))); |
1476 | else |
1477 | PreventCommandIfParallelMode(CreateCommandTag((Node *) pstmt)); |
1478 | } |
1479 | } |
1480 | } |
1481 | |
1482 | /* Set up the snapshot to use. */ |
1483 | if (read_only) |
1484 | snapshot = GetActiveSnapshot(); |
1485 | else |
1486 | { |
1487 | CommandCounterIncrement(); |
1488 | snapshot = GetTransactionSnapshot(); |
1489 | } |
1490 | |
1491 | /* |
1492 | * If the plan has parameters, copy them into the portal. Note that this |
1493 | * must be done after revalidating the plan, because in dynamic parameter |
1494 | * cases the set of parameters could have changed during re-parsing. |
1495 | */ |
1496 | if (paramLI) |
1497 | { |
1498 | oldcontext = MemoryContextSwitchTo(portal->portalContext); |
1499 | paramLI = copyParamList(paramLI); |
1500 | MemoryContextSwitchTo(oldcontext); |
1501 | } |
1502 | |
1503 | /* |
1504 | * Start portal execution. |
1505 | */ |
1506 | PortalStart(portal, paramLI, 0, snapshot); |
1507 | |
1508 | Assert(portal->strategy != PORTAL_MULTI_QUERY); |
1509 | |
1510 | /* Pop the error context stack */ |
1511 | error_context_stack = spierrcontext.previous; |
1512 | |
1513 | /* Pop the SPI stack */ |
1514 | _SPI_end_call(true); |
1515 | |
1516 | /* Return the created portal */ |
1517 | return portal; |
1518 | } |
1519 | |
1520 | |
1521 | /* |
1522 | * SPI_cursor_find() |
1523 | * |
1524 | * Find the portal of an existing open cursor |
1525 | */ |
1526 | Portal |
1527 | SPI_cursor_find(const char *name) |
1528 | { |
1529 | return GetPortalByName(name); |
1530 | } |
1531 | |
1532 | |
1533 | /* |
1534 | * SPI_cursor_fetch() |
1535 | * |
1536 | * Fetch rows in a cursor |
1537 | */ |
1538 | void |
1539 | SPI_cursor_fetch(Portal portal, bool forward, long count) |
1540 | { |
1541 | _SPI_cursor_operation(portal, |
1542 | forward ? FETCH_FORWARD : FETCH_BACKWARD, count, |
1543 | CreateDestReceiver(DestSPI)); |
1544 | /* we know that the DestSPI receiver doesn't need a destroy call */ |
1545 | } |
1546 | |
1547 | |
1548 | /* |
1549 | * SPI_cursor_move() |
1550 | * |
1551 | * Move in a cursor |
1552 | */ |
1553 | void |
1554 | SPI_cursor_move(Portal portal, bool forward, long count) |
1555 | { |
1556 | _SPI_cursor_operation(portal, |
1557 | forward ? FETCH_FORWARD : FETCH_BACKWARD, count, |
1558 | None_Receiver); |
1559 | } |
1560 | |
1561 | |
1562 | /* |
1563 | * SPI_scroll_cursor_fetch() |
1564 | * |
1565 | * Fetch rows in a scrollable cursor |
1566 | */ |
1567 | void |
1568 | SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count) |
1569 | { |
1570 | _SPI_cursor_operation(portal, |
1571 | direction, count, |
1572 | CreateDestReceiver(DestSPI)); |
1573 | /* we know that the DestSPI receiver doesn't need a destroy call */ |
1574 | } |
1575 | |
1576 | |
1577 | /* |
1578 | * SPI_scroll_cursor_move() |
1579 | * |
1580 | * Move in a scrollable cursor |
1581 | */ |
1582 | void |
1583 | SPI_scroll_cursor_move(Portal portal, FetchDirection direction, long count) |
1584 | { |
1585 | _SPI_cursor_operation(portal, direction, count, None_Receiver); |
1586 | } |
1587 | |
1588 | |
1589 | /* |
1590 | * SPI_cursor_close() |
1591 | * |
1592 | * Close a cursor |
1593 | */ |
1594 | void |
1595 | SPI_cursor_close(Portal portal) |
1596 | { |
1597 | if (!PortalIsValid(portal)) |
1598 | elog(ERROR, "invalid portal in SPI cursor operation" ); |
1599 | |
1600 | PortalDrop(portal, false); |
1601 | } |
1602 | |
1603 | /* |
1604 | * Returns the Oid representing the type id for argument at argIndex. First |
1605 | * parameter is at index zero. |
1606 | */ |
1607 | Oid |
1608 | SPI_getargtypeid(SPIPlanPtr plan, int argIndex) |
1609 | { |
1610 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || |
1611 | argIndex < 0 || argIndex >= plan->nargs) |
1612 | { |
1613 | SPI_result = SPI_ERROR_ARGUMENT; |
1614 | return InvalidOid; |
1615 | } |
1616 | return plan->argtypes[argIndex]; |
1617 | } |
1618 | |
1619 | /* |
1620 | * Returns the number of arguments for the prepared plan. |
1621 | */ |
1622 | int |
1623 | SPI_getargcount(SPIPlanPtr plan) |
1624 | { |
1625 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC) |
1626 | { |
1627 | SPI_result = SPI_ERROR_ARGUMENT; |
1628 | return -1; |
1629 | } |
1630 | return plan->nargs; |
1631 | } |
1632 | |
1633 | /* |
1634 | * Returns true if the plan contains exactly one command |
1635 | * and that command returns tuples to the caller (eg, SELECT or |
1636 | * INSERT ... RETURNING, but not SELECT ... INTO). In essence, |
1637 | * the result indicates if the command can be used with SPI_cursor_open |
1638 | * |
1639 | * Parameters |
1640 | * plan: A plan previously prepared using SPI_prepare |
1641 | */ |
1642 | bool |
1643 | SPI_is_cursor_plan(SPIPlanPtr plan) |
1644 | { |
1645 | CachedPlanSource *plansource; |
1646 | |
1647 | if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC) |
1648 | { |
1649 | SPI_result = SPI_ERROR_ARGUMENT; |
1650 | return false; |
1651 | } |
1652 | |
1653 | if (list_length(plan->plancache_list) != 1) |
1654 | { |
1655 | SPI_result = 0; |
1656 | return false; /* not exactly 1 pre-rewrite command */ |
1657 | } |
1658 | plansource = (CachedPlanSource *) linitial(plan->plancache_list); |
1659 | |
1660 | /* |
1661 | * We used to force revalidation of the cached plan here, but that seems |
1662 | * unnecessary: invalidation could mean a change in the rowtype of the |
1663 | * tuples returned by a plan, but not whether it returns tuples at all. |
1664 | */ |
1665 | SPI_result = 0; |
1666 | |
1667 | /* Does it return tuples? */ |
1668 | if (plansource->resultDesc) |
1669 | return true; |
1670 | |
1671 | return false; |
1672 | } |
1673 | |
1674 | /* |
1675 | * SPI_plan_is_valid --- test whether a SPI plan is currently valid |
1676 | * (that is, not marked as being in need of revalidation). |
1677 | * |
1678 | * See notes for CachedPlanIsValid before using this. |
1679 | */ |
1680 | bool |
1681 | SPI_plan_is_valid(SPIPlanPtr plan) |
1682 | { |
1683 | ListCell *lc; |
1684 | |
1685 | Assert(plan->magic == _SPI_PLAN_MAGIC); |
1686 | |
1687 | foreach(lc, plan->plancache_list) |
1688 | { |
1689 | CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc); |
1690 | |
1691 | if (!CachedPlanIsValid(plansource)) |
1692 | return false; |
1693 | } |
1694 | return true; |
1695 | } |
1696 | |
1697 | /* |
1698 | * SPI_result_code_string --- convert any SPI return code to a string |
1699 | * |
1700 | * This is often useful in error messages. Most callers will probably |
1701 | * only pass negative (error-case) codes, but for generality we recognize |
1702 | * the success codes too. |
1703 | */ |
1704 | const char * |
1705 | SPI_result_code_string(int code) |
1706 | { |
1707 | static char buf[64]; |
1708 | |
1709 | switch (code) |
1710 | { |
1711 | case SPI_ERROR_CONNECT: |
1712 | return "SPI_ERROR_CONNECT" ; |
1713 | case SPI_ERROR_COPY: |
1714 | return "SPI_ERROR_COPY" ; |
1715 | case SPI_ERROR_OPUNKNOWN: |
1716 | return "SPI_ERROR_OPUNKNOWN" ; |
1717 | case SPI_ERROR_UNCONNECTED: |
1718 | return "SPI_ERROR_UNCONNECTED" ; |
1719 | case SPI_ERROR_ARGUMENT: |
1720 | return "SPI_ERROR_ARGUMENT" ; |
1721 | case SPI_ERROR_PARAM: |
1722 | return "SPI_ERROR_PARAM" ; |
1723 | case SPI_ERROR_TRANSACTION: |
1724 | return "SPI_ERROR_TRANSACTION" ; |
1725 | case SPI_ERROR_NOATTRIBUTE: |
1726 | return "SPI_ERROR_NOATTRIBUTE" ; |
1727 | case SPI_ERROR_NOOUTFUNC: |
1728 | return "SPI_ERROR_NOOUTFUNC" ; |
1729 | case SPI_ERROR_TYPUNKNOWN: |
1730 | return "SPI_ERROR_TYPUNKNOWN" ; |
1731 | case SPI_ERROR_REL_DUPLICATE: |
1732 | return "SPI_ERROR_REL_DUPLICATE" ; |
1733 | case SPI_ERROR_REL_NOT_FOUND: |
1734 | return "SPI_ERROR_REL_NOT_FOUND" ; |
1735 | case SPI_OK_CONNECT: |
1736 | return "SPI_OK_CONNECT" ; |
1737 | case SPI_OK_FINISH: |
1738 | return "SPI_OK_FINISH" ; |
1739 | case SPI_OK_FETCH: |
1740 | return "SPI_OK_FETCH" ; |
1741 | case SPI_OK_UTILITY: |
1742 | return "SPI_OK_UTILITY" ; |
1743 | case SPI_OK_SELECT: |
1744 | return "SPI_OK_SELECT" ; |
1745 | case SPI_OK_SELINTO: |
1746 | return "SPI_OK_SELINTO" ; |
1747 | case SPI_OK_INSERT: |
1748 | return "SPI_OK_INSERT" ; |
1749 | case SPI_OK_DELETE: |
1750 | return "SPI_OK_DELETE" ; |
1751 | case SPI_OK_UPDATE: |
1752 | return "SPI_OK_UPDATE" ; |
1753 | case SPI_OK_CURSOR: |
1754 | return "SPI_OK_CURSOR" ; |
1755 | case SPI_OK_INSERT_RETURNING: |
1756 | return "SPI_OK_INSERT_RETURNING" ; |
1757 | case SPI_OK_DELETE_RETURNING: |
1758 | return "SPI_OK_DELETE_RETURNING" ; |
1759 | case SPI_OK_UPDATE_RETURNING: |
1760 | return "SPI_OK_UPDATE_RETURNING" ; |
1761 | case SPI_OK_REWRITTEN: |
1762 | return "SPI_OK_REWRITTEN" ; |
1763 | case SPI_OK_REL_REGISTER: |
1764 | return "SPI_OK_REL_REGISTER" ; |
1765 | case SPI_OK_REL_UNREGISTER: |
1766 | return "SPI_OK_REL_UNREGISTER" ; |
1767 | } |
1768 | /* Unrecognized code ... return something useful ... */ |
1769 | sprintf(buf, "Unrecognized SPI code %d" , code); |
1770 | return buf; |
1771 | } |
1772 | |
1773 | /* |
1774 | * SPI_plan_get_plan_sources --- get a SPI plan's underlying list of |
1775 | * CachedPlanSources. |
1776 | * |
1777 | * This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL |
1778 | * look directly into the SPIPlan for itself). It's not documented in |
1779 | * spi.sgml because we'd just as soon not have too many places using this. |
1780 | */ |
1781 | List * |
1782 | SPI_plan_get_plan_sources(SPIPlanPtr plan) |
1783 | { |
1784 | Assert(plan->magic == _SPI_PLAN_MAGIC); |
1785 | return plan->plancache_list; |
1786 | } |
1787 | |
1788 | /* |
1789 | * SPI_plan_get_cached_plan --- get a SPI plan's generic CachedPlan, |
1790 | * if the SPI plan contains exactly one CachedPlanSource. If not, |
1791 | * return NULL. Caller is responsible for doing ReleaseCachedPlan(). |
1792 | * |
1793 | * This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL |
1794 | * look directly into the SPIPlan for itself). It's not documented in |
1795 | * spi.sgml because we'd just as soon not have too many places using this. |
1796 | */ |
1797 | CachedPlan * |
1798 | SPI_plan_get_cached_plan(SPIPlanPtr plan) |
1799 | { |
1800 | CachedPlanSource *plansource; |
1801 | CachedPlan *cplan; |
1802 | ErrorContextCallback spierrcontext; |
1803 | |
1804 | Assert(plan->magic == _SPI_PLAN_MAGIC); |
1805 | |
1806 | /* Can't support one-shot plans here */ |
1807 | if (plan->oneshot) |
1808 | return NULL; |
1809 | |
1810 | /* Must have exactly one CachedPlanSource */ |
1811 | if (list_length(plan->plancache_list) != 1) |
1812 | return NULL; |
1813 | plansource = (CachedPlanSource *) linitial(plan->plancache_list); |
1814 | |
1815 | /* Setup error traceback support for ereport() */ |
1816 | spierrcontext.callback = _SPI_error_callback; |
1817 | spierrcontext.arg = unconstify(char *, plansource->query_string); |
1818 | spierrcontext.previous = error_context_stack; |
1819 | error_context_stack = &spierrcontext; |
1820 | |
1821 | /* Get the generic plan for the query */ |
1822 | cplan = GetCachedPlan(plansource, NULL, plan->saved, |
1823 | _SPI_current->queryEnv); |
1824 | Assert(cplan == plansource->gplan); |
1825 | |
1826 | /* Pop the error context stack */ |
1827 | error_context_stack = spierrcontext.previous; |
1828 | |
1829 | return cplan; |
1830 | } |
1831 | |
1832 | |
1833 | /* =================== private functions =================== */ |
1834 | |
1835 | /* |
1836 | * spi_dest_startup |
1837 | * Initialize to receive tuples from Executor into SPITupleTable |
1838 | * of current SPI procedure |
1839 | */ |
1840 | void |
1841 | spi_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo) |
1842 | { |
1843 | SPITupleTable *tuptable; |
1844 | MemoryContext oldcxt; |
1845 | MemoryContext tuptabcxt; |
1846 | |
1847 | if (_SPI_current == NULL) |
1848 | elog(ERROR, "spi_dest_startup called while not connected to SPI" ); |
1849 | |
1850 | if (_SPI_current->tuptable != NULL) |
1851 | elog(ERROR, "improper call to spi_dest_startup" ); |
1852 | |
1853 | /* We create the tuple table context as a child of procCxt */ |
1854 | |
1855 | oldcxt = _SPI_procmem(); /* switch to procedure memory context */ |
1856 | |
1857 | tuptabcxt = AllocSetContextCreate(CurrentMemoryContext, |
1858 | "SPI TupTable" , |
1859 | ALLOCSET_DEFAULT_SIZES); |
1860 | MemoryContextSwitchTo(tuptabcxt); |
1861 | |
1862 | _SPI_current->tuptable = tuptable = (SPITupleTable *) |
1863 | palloc0(sizeof(SPITupleTable)); |
1864 | tuptable->tuptabcxt = tuptabcxt; |
1865 | tuptable->subid = GetCurrentSubTransactionId(); |
1866 | |
1867 | /* |
1868 | * The tuptable is now valid enough to be freed by AtEOSubXact_SPI, so put |
1869 | * it onto the SPI context's tuptables list. This will ensure it's not |
1870 | * leaked even in the unlikely event the following few lines fail. |
1871 | */ |
1872 | slist_push_head(&_SPI_current->tuptables, &tuptable->next); |
1873 | |
1874 | /* set up initial allocations */ |
1875 | tuptable->alloced = tuptable->free = 128; |
1876 | tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple)); |
1877 | tuptable->tupdesc = CreateTupleDescCopy(typeinfo); |
1878 | |
1879 | MemoryContextSwitchTo(oldcxt); |
1880 | } |
1881 | |
1882 | /* |
1883 | * spi_printtup |
1884 | * store tuple retrieved by Executor into SPITupleTable |
1885 | * of current SPI procedure |
1886 | */ |
1887 | bool |
1888 | spi_printtup(TupleTableSlot *slot, DestReceiver *self) |
1889 | { |
1890 | SPITupleTable *tuptable; |
1891 | MemoryContext oldcxt; |
1892 | |
1893 | if (_SPI_current == NULL) |
1894 | elog(ERROR, "spi_printtup called while not connected to SPI" ); |
1895 | |
1896 | tuptable = _SPI_current->tuptable; |
1897 | if (tuptable == NULL) |
1898 | elog(ERROR, "improper call to spi_printtup" ); |
1899 | |
1900 | oldcxt = MemoryContextSwitchTo(tuptable->tuptabcxt); |
1901 | |
1902 | if (tuptable->free == 0) |
1903 | { |
1904 | /* Double the size of the pointer array */ |
1905 | tuptable->free = tuptable->alloced; |
1906 | tuptable->alloced += tuptable->free; |
1907 | tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals, |
1908 | tuptable->alloced * sizeof(HeapTuple)); |
1909 | } |
1910 | |
1911 | tuptable->vals[tuptable->alloced - tuptable->free] = |
1912 | ExecCopySlotHeapTuple(slot); |
1913 | (tuptable->free)--; |
1914 | |
1915 | MemoryContextSwitchTo(oldcxt); |
1916 | |
1917 | return true; |
1918 | } |
1919 | |
1920 | /* |
1921 | * Static functions |
1922 | */ |
1923 | |
1924 | /* |
1925 | * Parse and analyze a querystring. |
1926 | * |
1927 | * At entry, plan->argtypes and plan->nargs (or alternatively plan->parserSetup |
1928 | * and plan->parserSetupArg) must be valid, as must plan->cursor_options. |
1929 | * |
1930 | * Results are stored into *plan (specifically, plan->plancache_list). |
1931 | * Note that the result data is all in CurrentMemoryContext or child contexts |
1932 | * thereof; in practice this means it is in the SPI executor context, and |
1933 | * what we are creating is a "temporary" SPIPlan. Cruft generated during |
1934 | * parsing is also left in CurrentMemoryContext. |
1935 | */ |
1936 | static void |
1937 | _SPI_prepare_plan(const char *src, SPIPlanPtr plan) |
1938 | { |
1939 | List *raw_parsetree_list; |
1940 | List *plancache_list; |
1941 | ListCell *list_item; |
1942 | ErrorContextCallback spierrcontext; |
1943 | |
1944 | /* |
1945 | * Setup error traceback support for ereport() |
1946 | */ |
1947 | spierrcontext.callback = _SPI_error_callback; |
1948 | spierrcontext.arg = unconstify(char *, src); |
1949 | spierrcontext.previous = error_context_stack; |
1950 | error_context_stack = &spierrcontext; |
1951 | |
1952 | /* |
1953 | * Parse the request string into a list of raw parse trees. |
1954 | */ |
1955 | raw_parsetree_list = pg_parse_query(src); |
1956 | |
1957 | /* |
1958 | * Do parse analysis and rule rewrite for each raw parsetree, storing the |
1959 | * results into unsaved plancache entries. |
1960 | */ |
1961 | plancache_list = NIL; |
1962 | |
1963 | foreach(list_item, raw_parsetree_list) |
1964 | { |
1965 | RawStmt *parsetree = lfirst_node(RawStmt, list_item); |
1966 | List *stmt_list; |
1967 | CachedPlanSource *plansource; |
1968 | |
1969 | /* |
1970 | * Create the CachedPlanSource before we do parse analysis, since it |
1971 | * needs to see the unmodified raw parse tree. |
1972 | */ |
1973 | plansource = CreateCachedPlan(parsetree, |
1974 | src, |
1975 | CreateCommandTag(parsetree->stmt)); |
1976 | |
1977 | /* |
1978 | * Parameter datatypes are driven by parserSetup hook if provided, |
1979 | * otherwise we use the fixed parameter list. |
1980 | */ |
1981 | if (plan->parserSetup != NULL) |
1982 | { |
1983 | Assert(plan->nargs == 0); |
1984 | stmt_list = pg_analyze_and_rewrite_params(parsetree, |
1985 | src, |
1986 | plan->parserSetup, |
1987 | plan->parserSetupArg, |
1988 | _SPI_current->queryEnv); |
1989 | } |
1990 | else |
1991 | { |
1992 | stmt_list = pg_analyze_and_rewrite(parsetree, |
1993 | src, |
1994 | plan->argtypes, |
1995 | plan->nargs, |
1996 | _SPI_current->queryEnv); |
1997 | } |
1998 | |
1999 | /* Finish filling in the CachedPlanSource */ |
2000 | CompleteCachedPlan(plansource, |
2001 | stmt_list, |
2002 | NULL, |
2003 | plan->argtypes, |
2004 | plan->nargs, |
2005 | plan->parserSetup, |
2006 | plan->parserSetupArg, |
2007 | plan->cursor_options, |
2008 | false); /* not fixed result */ |
2009 | |
2010 | plancache_list = lappend(plancache_list, plansource); |
2011 | } |
2012 | |
2013 | plan->plancache_list = plancache_list; |
2014 | plan->oneshot = false; |
2015 | |
2016 | /* |
2017 | * Pop the error context stack |
2018 | */ |
2019 | error_context_stack = spierrcontext.previous; |
2020 | } |
2021 | |
2022 | /* |
2023 | * Parse, but don't analyze, a querystring. |
2024 | * |
2025 | * This is a stripped-down version of _SPI_prepare_plan that only does the |
2026 | * initial raw parsing. It creates "one shot" CachedPlanSources |
2027 | * that still require parse analysis before execution is possible. |
2028 | * |
2029 | * The advantage of using the "one shot" form of CachedPlanSource is that |
2030 | * we eliminate data copying and invalidation overhead. Postponing parse |
2031 | * analysis also prevents issues if some of the raw parsetrees are DDL |
2032 | * commands that affect validity of later parsetrees. Both of these |
2033 | * attributes are good things for SPI_execute() and similar cases. |
2034 | * |
2035 | * Results are stored into *plan (specifically, plan->plancache_list). |
2036 | * Note that the result data is all in CurrentMemoryContext or child contexts |
2037 | * thereof; in practice this means it is in the SPI executor context, and |
2038 | * what we are creating is a "temporary" SPIPlan. Cruft generated during |
2039 | * parsing is also left in CurrentMemoryContext. |
2040 | */ |
2041 | static void |
2042 | _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan) |
2043 | { |
2044 | List *raw_parsetree_list; |
2045 | List *plancache_list; |
2046 | ListCell *list_item; |
2047 | ErrorContextCallback spierrcontext; |
2048 | |
2049 | /* |
2050 | * Setup error traceback support for ereport() |
2051 | */ |
2052 | spierrcontext.callback = _SPI_error_callback; |
2053 | spierrcontext.arg = unconstify(char *, src); |
2054 | spierrcontext.previous = error_context_stack; |
2055 | error_context_stack = &spierrcontext; |
2056 | |
2057 | /* |
2058 | * Parse the request string into a list of raw parse trees. |
2059 | */ |
2060 | raw_parsetree_list = pg_parse_query(src); |
2061 | |
2062 | /* |
2063 | * Construct plancache entries, but don't do parse analysis yet. |
2064 | */ |
2065 | plancache_list = NIL; |
2066 | |
2067 | foreach(list_item, raw_parsetree_list) |
2068 | { |
2069 | RawStmt *parsetree = lfirst_node(RawStmt, list_item); |
2070 | CachedPlanSource *plansource; |
2071 | |
2072 | plansource = CreateOneShotCachedPlan(parsetree, |
2073 | src, |
2074 | CreateCommandTag(parsetree->stmt)); |
2075 | |
2076 | plancache_list = lappend(plancache_list, plansource); |
2077 | } |
2078 | |
2079 | plan->plancache_list = plancache_list; |
2080 | plan->oneshot = true; |
2081 | |
2082 | /* |
2083 | * Pop the error context stack |
2084 | */ |
2085 | error_context_stack = spierrcontext.previous; |
2086 | } |
2087 | |
2088 | /* |
2089 | * Execute the given plan with the given parameter values |
2090 | * |
2091 | * snapshot: query snapshot to use, or InvalidSnapshot for the normal |
2092 | * behavior of taking a new snapshot for each query. |
2093 | * crosscheck_snapshot: for RI use, all others pass InvalidSnapshot |
2094 | * read_only: true for read-only execution (no CommandCounterIncrement) |
2095 | * fire_triggers: true to fire AFTER triggers at end of query (normal case); |
2096 | * false means any AFTER triggers are postponed to end of outer query |
2097 | * tcount: execution tuple-count limit, or 0 for none |
2098 | */ |
2099 | static int |
2100 | _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, |
2101 | Snapshot snapshot, Snapshot crosscheck_snapshot, |
2102 | bool read_only, bool fire_triggers, uint64 tcount) |
2103 | { |
2104 | int my_res = 0; |
2105 | uint64 my_processed = 0; |
2106 | SPITupleTable *my_tuptable = NULL; |
2107 | int res = 0; |
2108 | bool pushed_active_snap = false; |
2109 | ErrorContextCallback spierrcontext; |
2110 | CachedPlan *cplan = NULL; |
2111 | ListCell *lc1; |
2112 | |
2113 | /* |
2114 | * Setup error traceback support for ereport() |
2115 | */ |
2116 | spierrcontext.callback = _SPI_error_callback; |
2117 | spierrcontext.arg = NULL; /* we'll fill this below */ |
2118 | spierrcontext.previous = error_context_stack; |
2119 | error_context_stack = &spierrcontext; |
2120 | |
2121 | /* |
2122 | * We support four distinct snapshot management behaviors: |
2123 | * |
2124 | * snapshot != InvalidSnapshot, read_only = true: use exactly the given |
2125 | * snapshot. |
2126 | * |
2127 | * snapshot != InvalidSnapshot, read_only = false: use the given snapshot, |
2128 | * modified by advancing its command ID before each querytree. |
2129 | * |
2130 | * snapshot == InvalidSnapshot, read_only = true: use the entry-time |
2131 | * ActiveSnapshot, if any (if there isn't one, we run with no snapshot). |
2132 | * |
2133 | * snapshot == InvalidSnapshot, read_only = false: take a full new |
2134 | * snapshot for each user command, and advance its command ID before each |
2135 | * querytree within the command. |
2136 | * |
2137 | * In the first two cases, we can just push the snap onto the stack once |
2138 | * for the whole plan list. |
2139 | * |
2140 | * But if the plan has no_snapshots set to true, then don't manage |
2141 | * snapshots at all. The caller should then take care of that. |
2142 | */ |
2143 | if (snapshot != InvalidSnapshot && !plan->no_snapshots) |
2144 | { |
2145 | if (read_only) |
2146 | { |
2147 | PushActiveSnapshot(snapshot); |
2148 | pushed_active_snap = true; |
2149 | } |
2150 | else |
2151 | { |
2152 | /* Make sure we have a private copy of the snapshot to modify */ |
2153 | PushCopiedSnapshot(snapshot); |
2154 | pushed_active_snap = true; |
2155 | } |
2156 | } |
2157 | |
2158 | foreach(lc1, plan->plancache_list) |
2159 | { |
2160 | CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1); |
2161 | List *stmt_list; |
2162 | ListCell *lc2; |
2163 | |
2164 | spierrcontext.arg = unconstify(char *, plansource->query_string); |
2165 | |
2166 | /* |
2167 | * If this is a one-shot plan, we still need to do parse analysis. |
2168 | */ |
2169 | if (plan->oneshot) |
2170 | { |
2171 | RawStmt *parsetree = plansource->raw_parse_tree; |
2172 | const char *src = plansource->query_string; |
2173 | List *stmt_list; |
2174 | |
2175 | /* |
2176 | * Parameter datatypes are driven by parserSetup hook if provided, |
2177 | * otherwise we use the fixed parameter list. |
2178 | */ |
2179 | if (parsetree == NULL) |
2180 | stmt_list = NIL; |
2181 | else if (plan->parserSetup != NULL) |
2182 | { |
2183 | Assert(plan->nargs == 0); |
2184 | stmt_list = pg_analyze_and_rewrite_params(parsetree, |
2185 | src, |
2186 | plan->parserSetup, |
2187 | plan->parserSetupArg, |
2188 | _SPI_current->queryEnv); |
2189 | } |
2190 | else |
2191 | { |
2192 | stmt_list = pg_analyze_and_rewrite(parsetree, |
2193 | src, |
2194 | plan->argtypes, |
2195 | plan->nargs, |
2196 | _SPI_current->queryEnv); |
2197 | } |
2198 | |
2199 | /* Finish filling in the CachedPlanSource */ |
2200 | CompleteCachedPlan(plansource, |
2201 | stmt_list, |
2202 | NULL, |
2203 | plan->argtypes, |
2204 | plan->nargs, |
2205 | plan->parserSetup, |
2206 | plan->parserSetupArg, |
2207 | plan->cursor_options, |
2208 | false); /* not fixed result */ |
2209 | } |
2210 | |
2211 | /* |
2212 | * Replan if needed, and increment plan refcount. If it's a saved |
2213 | * plan, the refcount must be backed by the CurrentResourceOwner. |
2214 | */ |
2215 | cplan = GetCachedPlan(plansource, paramLI, plan->saved, _SPI_current->queryEnv); |
2216 | stmt_list = cplan->stmt_list; |
2217 | |
2218 | /* |
2219 | * In the default non-read-only case, get a new snapshot, replacing |
2220 | * any that we pushed in a previous cycle. |
2221 | */ |
2222 | if (snapshot == InvalidSnapshot && !read_only && !plan->no_snapshots) |
2223 | { |
2224 | if (pushed_active_snap) |
2225 | PopActiveSnapshot(); |
2226 | PushActiveSnapshot(GetTransactionSnapshot()); |
2227 | pushed_active_snap = true; |
2228 | } |
2229 | |
2230 | foreach(lc2, stmt_list) |
2231 | { |
2232 | PlannedStmt *stmt = lfirst_node(PlannedStmt, lc2); |
2233 | bool canSetTag = stmt->canSetTag; |
2234 | DestReceiver *dest; |
2235 | |
2236 | _SPI_current->processed = 0; |
2237 | _SPI_current->tuptable = NULL; |
2238 | |
2239 | if (stmt->utilityStmt) |
2240 | { |
2241 | if (IsA(stmt->utilityStmt, CopyStmt)) |
2242 | { |
2243 | CopyStmt *cstmt = (CopyStmt *) stmt->utilityStmt; |
2244 | |
2245 | if (cstmt->filename == NULL) |
2246 | { |
2247 | my_res = SPI_ERROR_COPY; |
2248 | goto fail; |
2249 | } |
2250 | } |
2251 | else if (IsA(stmt->utilityStmt, TransactionStmt)) |
2252 | { |
2253 | my_res = SPI_ERROR_TRANSACTION; |
2254 | goto fail; |
2255 | } |
2256 | } |
2257 | |
2258 | if (read_only && !CommandIsReadOnly(stmt)) |
2259 | ereport(ERROR, |
2260 | (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), |
2261 | /* translator: %s is a SQL statement name */ |
2262 | errmsg("%s is not allowed in a non-volatile function" , |
2263 | CreateCommandTag((Node *) stmt)))); |
2264 | |
2265 | if (IsInParallelMode() && !CommandIsReadOnly(stmt)) |
2266 | PreventCommandIfParallelMode(CreateCommandTag((Node *) stmt)); |
2267 | |
2268 | /* |
2269 | * If not read-only mode, advance the command counter before each |
2270 | * command and update the snapshot. |
2271 | */ |
2272 | if (!read_only && !plan->no_snapshots) |
2273 | { |
2274 | CommandCounterIncrement(); |
2275 | UpdateActiveSnapshotCommandId(); |
2276 | } |
2277 | |
2278 | dest = CreateDestReceiver(canSetTag ? DestSPI : DestNone); |
2279 | |
2280 | if (stmt->utilityStmt == NULL) |
2281 | { |
2282 | QueryDesc *qdesc; |
2283 | Snapshot snap; |
2284 | |
2285 | if (ActiveSnapshotSet()) |
2286 | snap = GetActiveSnapshot(); |
2287 | else |
2288 | snap = InvalidSnapshot; |
2289 | |
2290 | qdesc = CreateQueryDesc(stmt, |
2291 | plansource->query_string, |
2292 | snap, crosscheck_snapshot, |
2293 | dest, |
2294 | paramLI, _SPI_current->queryEnv, |
2295 | 0); |
2296 | res = _SPI_pquery(qdesc, fire_triggers, |
2297 | canSetTag ? tcount : 0); |
2298 | FreeQueryDesc(qdesc); |
2299 | } |
2300 | else |
2301 | { |
2302 | char completionTag[COMPLETION_TAG_BUFSIZE]; |
2303 | ProcessUtilityContext context; |
2304 | |
2305 | /* |
2306 | * If the SPI context is atomic, or we are asked to manage |
2307 | * snapshots, then we are in an atomic execution context. |
2308 | * Conversely, to propagate a nonatomic execution context, the |
2309 | * caller must be in a nonatomic SPI context and manage |
2310 | * snapshots itself. |
2311 | */ |
2312 | if (_SPI_current->atomic || !plan->no_snapshots) |
2313 | context = PROCESS_UTILITY_QUERY; |
2314 | else |
2315 | context = PROCESS_UTILITY_QUERY_NONATOMIC; |
2316 | |
2317 | ProcessUtility(stmt, |
2318 | plansource->query_string, |
2319 | context, |
2320 | paramLI, |
2321 | _SPI_current->queryEnv, |
2322 | dest, |
2323 | completionTag); |
2324 | |
2325 | /* Update "processed" if stmt returned tuples */ |
2326 | if (_SPI_current->tuptable) |
2327 | _SPI_current->processed = _SPI_current->tuptable->alloced - |
2328 | _SPI_current->tuptable->free; |
2329 | |
2330 | res = SPI_OK_UTILITY; |
2331 | |
2332 | /* |
2333 | * Some utility statements return a row count, even though the |
2334 | * tuples are not returned to the caller. |
2335 | */ |
2336 | if (IsA(stmt->utilityStmt, CreateTableAsStmt)) |
2337 | { |
2338 | CreateTableAsStmt *ctastmt = (CreateTableAsStmt *) stmt->utilityStmt; |
2339 | |
2340 | if (strncmp(completionTag, "SELECT " , 7) == 0) |
2341 | _SPI_current->processed = |
2342 | pg_strtouint64(completionTag + 7, NULL, 10); |
2343 | else |
2344 | { |
2345 | /* |
2346 | * Must be an IF NOT EXISTS that did nothing, or a |
2347 | * CREATE ... WITH NO DATA. |
2348 | */ |
2349 | Assert(ctastmt->if_not_exists || |
2350 | ctastmt->into->skipData); |
2351 | _SPI_current->processed = 0; |
2352 | } |
2353 | |
2354 | /* |
2355 | * For historical reasons, if CREATE TABLE AS was spelled |
2356 | * as SELECT INTO, return a special return code. |
2357 | */ |
2358 | if (ctastmt->is_select_into) |
2359 | res = SPI_OK_SELINTO; |
2360 | } |
2361 | else if (IsA(stmt->utilityStmt, CopyStmt)) |
2362 | { |
2363 | Assert(strncmp(completionTag, "COPY " , 5) == 0); |
2364 | _SPI_current->processed = pg_strtouint64(completionTag + 5, |
2365 | NULL, 10); |
2366 | } |
2367 | } |
2368 | |
2369 | /* |
2370 | * The last canSetTag query sets the status values returned to the |
2371 | * caller. Be careful to free any tuptables not returned, to |
2372 | * avoid intratransaction memory leak. |
2373 | */ |
2374 | if (canSetTag) |
2375 | { |
2376 | my_processed = _SPI_current->processed; |
2377 | SPI_freetuptable(my_tuptable); |
2378 | my_tuptable = _SPI_current->tuptable; |
2379 | my_res = res; |
2380 | } |
2381 | else |
2382 | { |
2383 | SPI_freetuptable(_SPI_current->tuptable); |
2384 | _SPI_current->tuptable = NULL; |
2385 | } |
2386 | /* we know that the receiver doesn't need a destroy call */ |
2387 | if (res < 0) |
2388 | { |
2389 | my_res = res; |
2390 | goto fail; |
2391 | } |
2392 | } |
2393 | |
2394 | /* Done with this plan, so release refcount */ |
2395 | ReleaseCachedPlan(cplan, plan->saved); |
2396 | cplan = NULL; |
2397 | |
2398 | /* |
2399 | * If not read-only mode, advance the command counter after the last |
2400 | * command. This ensures that its effects are visible, in case it was |
2401 | * DDL that would affect the next CachedPlanSource. |
2402 | */ |
2403 | if (!read_only) |
2404 | CommandCounterIncrement(); |
2405 | } |
2406 | |
2407 | fail: |
2408 | |
2409 | /* Pop the snapshot off the stack if we pushed one */ |
2410 | if (pushed_active_snap) |
2411 | PopActiveSnapshot(); |
2412 | |
2413 | /* We no longer need the cached plan refcount, if any */ |
2414 | if (cplan) |
2415 | ReleaseCachedPlan(cplan, plan->saved); |
2416 | |
2417 | /* |
2418 | * Pop the error context stack |
2419 | */ |
2420 | error_context_stack = spierrcontext.previous; |
2421 | |
2422 | /* Save results for caller */ |
2423 | SPI_processed = my_processed; |
2424 | SPI_tuptable = my_tuptable; |
2425 | |
2426 | /* tuptable now is caller's responsibility, not SPI's */ |
2427 | _SPI_current->tuptable = NULL; |
2428 | |
2429 | /* |
2430 | * If none of the queries had canSetTag, return SPI_OK_REWRITTEN. Prior to |
2431 | * 8.4, we used return the last query's result code, but not its auxiliary |
2432 | * results, but that's confusing. |
2433 | */ |
2434 | if (my_res == 0) |
2435 | my_res = SPI_OK_REWRITTEN; |
2436 | |
2437 | return my_res; |
2438 | } |
2439 | |
2440 | /* |
2441 | * Convert arrays of query parameters to form wanted by planner and executor |
2442 | */ |
2443 | static ParamListInfo |
2444 | _SPI_convert_params(int nargs, Oid *argtypes, |
2445 | Datum *Values, const char *Nulls) |
2446 | { |
2447 | ParamListInfo paramLI; |
2448 | |
2449 | if (nargs > 0) |
2450 | { |
2451 | paramLI = makeParamList(nargs); |
2452 | |
2453 | for (int i = 0; i < nargs; i++) |
2454 | { |
2455 | ParamExternData *prm = ¶mLI->params[i]; |
2456 | |
2457 | prm->value = Values[i]; |
2458 | prm->isnull = (Nulls && Nulls[i] == 'n'); |
2459 | prm->pflags = PARAM_FLAG_CONST; |
2460 | prm->ptype = argtypes[i]; |
2461 | } |
2462 | } |
2463 | else |
2464 | paramLI = NULL; |
2465 | return paramLI; |
2466 | } |
2467 | |
2468 | static int |
2469 | _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount) |
2470 | { |
2471 | int operation = queryDesc->operation; |
2472 | int eflags; |
2473 | int res; |
2474 | |
2475 | switch (operation) |
2476 | { |
2477 | case CMD_SELECT: |
2478 | if (queryDesc->dest->mydest != DestSPI) |
2479 | { |
2480 | /* Don't return SPI_OK_SELECT if we're discarding result */ |
2481 | res = SPI_OK_UTILITY; |
2482 | } |
2483 | else |
2484 | res = SPI_OK_SELECT; |
2485 | break; |
2486 | case CMD_INSERT: |
2487 | if (queryDesc->plannedstmt->hasReturning) |
2488 | res = SPI_OK_INSERT_RETURNING; |
2489 | else |
2490 | res = SPI_OK_INSERT; |
2491 | break; |
2492 | case CMD_DELETE: |
2493 | if (queryDesc->plannedstmt->hasReturning) |
2494 | res = SPI_OK_DELETE_RETURNING; |
2495 | else |
2496 | res = SPI_OK_DELETE; |
2497 | break; |
2498 | case CMD_UPDATE: |
2499 | if (queryDesc->plannedstmt->hasReturning) |
2500 | res = SPI_OK_UPDATE_RETURNING; |
2501 | else |
2502 | res = SPI_OK_UPDATE; |
2503 | break; |
2504 | default: |
2505 | return SPI_ERROR_OPUNKNOWN; |
2506 | } |
2507 | |
2508 | #ifdef SPI_EXECUTOR_STATS |
2509 | if (ShowExecutorStats) |
2510 | ResetUsage(); |
2511 | #endif |
2512 | |
2513 | /* Select execution options */ |
2514 | if (fire_triggers) |
2515 | eflags = 0; /* default run-to-completion flags */ |
2516 | else |
2517 | eflags = EXEC_FLAG_SKIP_TRIGGERS; |
2518 | |
2519 | ExecutorStart(queryDesc, eflags); |
2520 | |
2521 | ExecutorRun(queryDesc, ForwardScanDirection, tcount, true); |
2522 | |
2523 | _SPI_current->processed = queryDesc->estate->es_processed; |
2524 | |
2525 | if ((res == SPI_OK_SELECT || queryDesc->plannedstmt->hasReturning) && |
2526 | queryDesc->dest->mydest == DestSPI) |
2527 | { |
2528 | if (_SPI_checktuples()) |
2529 | elog(ERROR, "consistency check on SPI tuple count failed" ); |
2530 | } |
2531 | |
2532 | ExecutorFinish(queryDesc); |
2533 | ExecutorEnd(queryDesc); |
2534 | /* FreeQueryDesc is done by the caller */ |
2535 | |
2536 | #ifdef SPI_EXECUTOR_STATS |
2537 | if (ShowExecutorStats) |
2538 | ShowUsage("SPI EXECUTOR STATS" ); |
2539 | #endif |
2540 | |
2541 | return res; |
2542 | } |
2543 | |
2544 | /* |
2545 | * _SPI_error_callback |
2546 | * |
2547 | * Add context information when a query invoked via SPI fails |
2548 | */ |
2549 | static void |
2550 | _SPI_error_callback(void *arg) |
2551 | { |
2552 | const char *query = (const char *) arg; |
2553 | int syntaxerrposition; |
2554 | |
2555 | if (query == NULL) /* in case arg wasn't set yet */ |
2556 | return; |
2557 | |
2558 | /* |
2559 | * If there is a syntax error position, convert to internal syntax error; |
2560 | * otherwise treat the query as an item of context stack |
2561 | */ |
2562 | syntaxerrposition = geterrposition(); |
2563 | if (syntaxerrposition > 0) |
2564 | { |
2565 | errposition(0); |
2566 | internalerrposition(syntaxerrposition); |
2567 | internalerrquery(query); |
2568 | } |
2569 | else |
2570 | errcontext("SQL statement \"%s\"" , query); |
2571 | } |
2572 | |
2573 | /* |
2574 | * _SPI_cursor_operation() |
2575 | * |
2576 | * Do a FETCH or MOVE in a cursor |
2577 | */ |
2578 | static void |
2579 | _SPI_cursor_operation(Portal portal, FetchDirection direction, long count, |
2580 | DestReceiver *dest) |
2581 | { |
2582 | uint64 nfetched; |
2583 | |
2584 | /* Check that the portal is valid */ |
2585 | if (!PortalIsValid(portal)) |
2586 | elog(ERROR, "invalid portal in SPI cursor operation" ); |
2587 | |
2588 | /* Push the SPI stack */ |
2589 | if (_SPI_begin_call(true) < 0) |
2590 | elog(ERROR, "SPI cursor operation called while not connected" ); |
2591 | |
2592 | /* Reset the SPI result (note we deliberately don't touch lastoid) */ |
2593 | SPI_processed = 0; |
2594 | SPI_tuptable = NULL; |
2595 | _SPI_current->processed = 0; |
2596 | _SPI_current->tuptable = NULL; |
2597 | |
2598 | /* Run the cursor */ |
2599 | nfetched = PortalRunFetch(portal, |
2600 | direction, |
2601 | count, |
2602 | dest); |
2603 | |
2604 | /* |
2605 | * Think not to combine this store with the preceding function call. If |
2606 | * the portal contains calls to functions that use SPI, then SPI_stack is |
2607 | * likely to move around while the portal runs. When control returns, |
2608 | * _SPI_current will point to the correct stack entry... but the pointer |
2609 | * may be different than it was beforehand. So we must be sure to re-fetch |
2610 | * the pointer after the function call completes. |
2611 | */ |
2612 | _SPI_current->processed = nfetched; |
2613 | |
2614 | if (dest->mydest == DestSPI && _SPI_checktuples()) |
2615 | elog(ERROR, "consistency check on SPI tuple count failed" ); |
2616 | |
2617 | /* Put the result into place for access by caller */ |
2618 | SPI_processed = _SPI_current->processed; |
2619 | SPI_tuptable = _SPI_current->tuptable; |
2620 | |
2621 | /* tuptable now is caller's responsibility, not SPI's */ |
2622 | _SPI_current->tuptable = NULL; |
2623 | |
2624 | /* Pop the SPI stack */ |
2625 | _SPI_end_call(true); |
2626 | } |
2627 | |
2628 | |
2629 | static MemoryContext |
2630 | _SPI_execmem(void) |
2631 | { |
2632 | return MemoryContextSwitchTo(_SPI_current->execCxt); |
2633 | } |
2634 | |
2635 | static MemoryContext |
2636 | _SPI_procmem(void) |
2637 | { |
2638 | return MemoryContextSwitchTo(_SPI_current->procCxt); |
2639 | } |
2640 | |
2641 | /* |
2642 | * _SPI_begin_call: begin a SPI operation within a connected procedure |
2643 | * |
2644 | * use_exec is true if we intend to make use of the procedure's execCxt |
2645 | * during this SPI operation. We'll switch into that context, and arrange |
2646 | * for it to be cleaned up at _SPI_end_call or if an error occurs. |
2647 | */ |
2648 | static int |
2649 | _SPI_begin_call(bool use_exec) |
2650 | { |
2651 | if (_SPI_current == NULL) |
2652 | return SPI_ERROR_UNCONNECTED; |
2653 | |
2654 | if (use_exec) |
2655 | { |
2656 | /* remember when the Executor operation started */ |
2657 | _SPI_current->execSubid = GetCurrentSubTransactionId(); |
2658 | /* switch to the Executor memory context */ |
2659 | _SPI_execmem(); |
2660 | } |
2661 | |
2662 | return 0; |
2663 | } |
2664 | |
2665 | /* |
2666 | * _SPI_end_call: end a SPI operation within a connected procedure |
2667 | * |
2668 | * use_exec must be the same as in the previous _SPI_begin_call |
2669 | * |
2670 | * Note: this currently has no failure return cases, so callers don't check |
2671 | */ |
2672 | static int |
2673 | _SPI_end_call(bool use_exec) |
2674 | { |
2675 | if (use_exec) |
2676 | { |
2677 | /* switch to the procedure memory context */ |
2678 | _SPI_procmem(); |
2679 | /* mark Executor context no longer in use */ |
2680 | _SPI_current->execSubid = InvalidSubTransactionId; |
2681 | /* and free Executor memory */ |
2682 | MemoryContextResetAndDeleteChildren(_SPI_current->execCxt); |
2683 | } |
2684 | |
2685 | return 0; |
2686 | } |
2687 | |
2688 | static bool |
2689 | _SPI_checktuples(void) |
2690 | { |
2691 | uint64 processed = _SPI_current->processed; |
2692 | SPITupleTable *tuptable = _SPI_current->tuptable; |
2693 | bool failed = false; |
2694 | |
2695 | if (tuptable == NULL) /* spi_dest_startup was not called */ |
2696 | failed = true; |
2697 | else if (processed != (tuptable->alloced - tuptable->free)) |
2698 | failed = true; |
2699 | |
2700 | return failed; |
2701 | } |
2702 | |
2703 | /* |
2704 | * Convert a "temporary" SPIPlan into an "unsaved" plan. |
2705 | * |
2706 | * The passed _SPI_plan struct is on the stack, and all its subsidiary data |
2707 | * is in or under the current SPI executor context. Copy the plan into the |
2708 | * SPI procedure context so it will survive _SPI_end_call(). To minimize |
2709 | * data copying, this destructively modifies the input plan, by taking the |
2710 | * plancache entries away from it and reparenting them to the new SPIPlan. |
2711 | */ |
2712 | static SPIPlanPtr |
2713 | _SPI_make_plan_non_temp(SPIPlanPtr plan) |
2714 | { |
2715 | SPIPlanPtr newplan; |
2716 | MemoryContext parentcxt = _SPI_current->procCxt; |
2717 | MemoryContext plancxt; |
2718 | MemoryContext oldcxt; |
2719 | ListCell *lc; |
2720 | |
2721 | /* Assert the input is a temporary SPIPlan */ |
2722 | Assert(plan->magic == _SPI_PLAN_MAGIC); |
2723 | Assert(plan->plancxt == NULL); |
2724 | /* One-shot plans can't be saved */ |
2725 | Assert(!plan->oneshot); |
2726 | |
2727 | /* |
2728 | * Create a memory context for the plan, underneath the procedure context. |
2729 | * We don't expect the plan to be very large. |
2730 | */ |
2731 | plancxt = AllocSetContextCreate(parentcxt, |
2732 | "SPI Plan" , |
2733 | ALLOCSET_SMALL_SIZES); |
2734 | oldcxt = MemoryContextSwitchTo(plancxt); |
2735 | |
2736 | /* Copy the SPI_plan struct and subsidiary data into the new context */ |
2737 | newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan)); |
2738 | newplan->magic = _SPI_PLAN_MAGIC; |
2739 | newplan->plancxt = plancxt; |
2740 | newplan->cursor_options = plan->cursor_options; |
2741 | newplan->nargs = plan->nargs; |
2742 | if (plan->nargs > 0) |
2743 | { |
2744 | newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid)); |
2745 | memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid)); |
2746 | } |
2747 | else |
2748 | newplan->argtypes = NULL; |
2749 | newplan->parserSetup = plan->parserSetup; |
2750 | newplan->parserSetupArg = plan->parserSetupArg; |
2751 | |
2752 | /* |
2753 | * Reparent all the CachedPlanSources into the procedure context. In |
2754 | * theory this could fail partway through due to the pallocs, but we don't |
2755 | * care too much since both the procedure context and the executor context |
2756 | * would go away on error. |
2757 | */ |
2758 | foreach(lc, plan->plancache_list) |
2759 | { |
2760 | CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc); |
2761 | |
2762 | CachedPlanSetParentContext(plansource, parentcxt); |
2763 | |
2764 | /* Build new list, with list cells in plancxt */ |
2765 | newplan->plancache_list = lappend(newplan->plancache_list, plansource); |
2766 | } |
2767 | |
2768 | MemoryContextSwitchTo(oldcxt); |
2769 | |
2770 | /* For safety, unlink the CachedPlanSources from the temporary plan */ |
2771 | plan->plancache_list = NIL; |
2772 | |
2773 | return newplan; |
2774 | } |
2775 | |
2776 | /* |
2777 | * Make a "saved" copy of the given plan. |
2778 | */ |
2779 | static SPIPlanPtr |
2780 | _SPI_save_plan(SPIPlanPtr plan) |
2781 | { |
2782 | SPIPlanPtr newplan; |
2783 | MemoryContext plancxt; |
2784 | MemoryContext oldcxt; |
2785 | ListCell *lc; |
2786 | |
2787 | /* One-shot plans can't be saved */ |
2788 | Assert(!plan->oneshot); |
2789 | |
2790 | /* |
2791 | * Create a memory context for the plan. We don't expect the plan to be |
2792 | * very large, so use smaller-than-default alloc parameters. It's a |
2793 | * transient context until we finish copying everything. |
2794 | */ |
2795 | plancxt = AllocSetContextCreate(CurrentMemoryContext, |
2796 | "SPI Plan" , |
2797 | ALLOCSET_SMALL_SIZES); |
2798 | oldcxt = MemoryContextSwitchTo(plancxt); |
2799 | |
2800 | /* Copy the SPI plan into its own context */ |
2801 | newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan)); |
2802 | newplan->magic = _SPI_PLAN_MAGIC; |
2803 | newplan->plancxt = plancxt; |
2804 | newplan->cursor_options = plan->cursor_options; |
2805 | newplan->nargs = plan->nargs; |
2806 | if (plan->nargs > 0) |
2807 | { |
2808 | newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid)); |
2809 | memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid)); |
2810 | } |
2811 | else |
2812 | newplan->argtypes = NULL; |
2813 | newplan->parserSetup = plan->parserSetup; |
2814 | newplan->parserSetupArg = plan->parserSetupArg; |
2815 | |
2816 | /* Copy all the plancache entries */ |
2817 | foreach(lc, plan->plancache_list) |
2818 | { |
2819 | CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc); |
2820 | CachedPlanSource *newsource; |
2821 | |
2822 | newsource = CopyCachedPlan(plansource); |
2823 | newplan->plancache_list = lappend(newplan->plancache_list, newsource); |
2824 | } |
2825 | |
2826 | MemoryContextSwitchTo(oldcxt); |
2827 | |
2828 | /* |
2829 | * Mark it saved, reparent it under CacheMemoryContext, and mark all the |
2830 | * component CachedPlanSources as saved. This sequence cannot fail |
2831 | * partway through, so there's no risk of long-term memory leakage. |
2832 | */ |
2833 | newplan->saved = true; |
2834 | MemoryContextSetParent(newplan->plancxt, CacheMemoryContext); |
2835 | |
2836 | foreach(lc, newplan->plancache_list) |
2837 | { |
2838 | CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc); |
2839 | |
2840 | SaveCachedPlan(plansource); |
2841 | } |
2842 | |
2843 | return newplan; |
2844 | } |
2845 | |
2846 | /* |
2847 | * Internal lookup of ephemeral named relation by name. |
2848 | */ |
2849 | static EphemeralNamedRelation |
2850 | _SPI_find_ENR_by_name(const char *name) |
2851 | { |
2852 | /* internal static function; any error is bug in SPI itself */ |
2853 | Assert(name != NULL); |
2854 | |
2855 | /* fast exit if no tuplestores have been added */ |
2856 | if (_SPI_current->queryEnv == NULL) |
2857 | return NULL; |
2858 | |
2859 | return get_ENR(_SPI_current->queryEnv, name); |
2860 | } |
2861 | |
2862 | /* |
2863 | * Register an ephemeral named relation for use by the planner and executor on |
2864 | * subsequent calls using this SPI connection. |
2865 | */ |
2866 | int |
2867 | SPI_register_relation(EphemeralNamedRelation enr) |
2868 | { |
2869 | EphemeralNamedRelation match; |
2870 | int res; |
2871 | |
2872 | if (enr == NULL || enr->md.name == NULL) |
2873 | return SPI_ERROR_ARGUMENT; |
2874 | |
2875 | res = _SPI_begin_call(false); /* keep current memory context */ |
2876 | if (res < 0) |
2877 | return res; |
2878 | |
2879 | match = _SPI_find_ENR_by_name(enr->md.name); |
2880 | if (match) |
2881 | res = SPI_ERROR_REL_DUPLICATE; |
2882 | else |
2883 | { |
2884 | if (_SPI_current->queryEnv == NULL) |
2885 | _SPI_current->queryEnv = create_queryEnv(); |
2886 | |
2887 | register_ENR(_SPI_current->queryEnv, enr); |
2888 | res = SPI_OK_REL_REGISTER; |
2889 | } |
2890 | |
2891 | _SPI_end_call(false); |
2892 | |
2893 | return res; |
2894 | } |
2895 | |
2896 | /* |
2897 | * Unregister an ephemeral named relation by name. This will probably be a |
2898 | * rarely used function, since SPI_finish will clear it automatically. |
2899 | */ |
2900 | int |
2901 | SPI_unregister_relation(const char *name) |
2902 | { |
2903 | EphemeralNamedRelation match; |
2904 | int res; |
2905 | |
2906 | if (name == NULL) |
2907 | return SPI_ERROR_ARGUMENT; |
2908 | |
2909 | res = _SPI_begin_call(false); /* keep current memory context */ |
2910 | if (res < 0) |
2911 | return res; |
2912 | |
2913 | match = _SPI_find_ENR_by_name(name); |
2914 | if (match) |
2915 | { |
2916 | unregister_ENR(_SPI_current->queryEnv, match->md.name); |
2917 | res = SPI_OK_REL_UNREGISTER; |
2918 | } |
2919 | else |
2920 | res = SPI_ERROR_REL_NOT_FOUND; |
2921 | |
2922 | _SPI_end_call(false); |
2923 | |
2924 | return res; |
2925 | } |
2926 | |
2927 | /* |
2928 | * Register the transient relations from 'tdata' using this SPI connection. |
2929 | * This should be called by PL implementations' trigger handlers after |
2930 | * connecting, in order to make transition tables visible to any queries run |
2931 | * in this connection. |
2932 | */ |
2933 | int |
2934 | SPI_register_trigger_data(TriggerData *tdata) |
2935 | { |
2936 | if (tdata == NULL) |
2937 | return SPI_ERROR_ARGUMENT; |
2938 | |
2939 | if (tdata->tg_newtable) |
2940 | { |
2941 | EphemeralNamedRelation enr = |
2942 | palloc(sizeof(EphemeralNamedRelationData)); |
2943 | int rc; |
2944 | |
2945 | enr->md.name = tdata->tg_trigger->tgnewtable; |
2946 | enr->md.reliddesc = tdata->tg_relation->rd_id; |
2947 | enr->md.tupdesc = NULL; |
2948 | enr->md.enrtype = ENR_NAMED_TUPLESTORE; |
2949 | enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_newtable); |
2950 | enr->reldata = tdata->tg_newtable; |
2951 | rc = SPI_register_relation(enr); |
2952 | if (rc != SPI_OK_REL_REGISTER) |
2953 | return rc; |
2954 | } |
2955 | |
2956 | if (tdata->tg_oldtable) |
2957 | { |
2958 | EphemeralNamedRelation enr = |
2959 | palloc(sizeof(EphemeralNamedRelationData)); |
2960 | int rc; |
2961 | |
2962 | enr->md.name = tdata->tg_trigger->tgoldtable; |
2963 | enr->md.reliddesc = tdata->tg_relation->rd_id; |
2964 | enr->md.tupdesc = NULL; |
2965 | enr->md.enrtype = ENR_NAMED_TUPLESTORE; |
2966 | enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_oldtable); |
2967 | enr->reldata = tdata->tg_oldtable; |
2968 | rc = SPI_register_relation(enr); |
2969 | if (rc != SPI_OK_REL_REGISTER) |
2970 | return rc; |
2971 | } |
2972 | |
2973 | return SPI_OK_TD_REGISTER; |
2974 | } |
2975 | |