1/*-------------------------------------------------------------------------
2 *
3 * heaptuple.c
4 * This file contains heap tuple accessor and mutator routines, as well
5 * as various tuple utilities.
6 *
7 * Some notes about varlenas and this code:
8 *
9 * Before Postgres 8.3 varlenas always had a 4-byte length header, and
10 * therefore always needed 4-byte alignment (at least). This wasted space
11 * for short varlenas, for example CHAR(1) took 5 bytes and could need up to
12 * 3 additional padding bytes for alignment.
13 *
14 * Now, a short varlena (up to 126 data bytes) is reduced to a 1-byte header
15 * and we don't align it. To hide this from datatype-specific functions that
16 * don't want to deal with it, such a datum is considered "toasted" and will
17 * be expanded back to the normal 4-byte-header format by pg_detoast_datum.
18 * (In performance-critical code paths we can use pg_detoast_datum_packed
19 * and the appropriate access macros to avoid that overhead.) Note that this
20 * conversion is performed directly in heap_form_tuple, without invoking
21 * tuptoaster.c.
22 *
23 * This change will break any code that assumes it needn't detoast values
24 * that have been put into a tuple but never sent to disk. Hopefully there
25 * are few such places.
26 *
27 * Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
28 * that's the normal requirement for the untoasted format. But we ignore that
29 * for the 1-byte-header format. This means that the actual start position
30 * of a varlena datum may vary depending on which format it has. To determine
31 * what is stored, we have to require that alignment padding bytes be zero.
32 * (Postgres actually has always zeroed them, but now it's required!) Since
33 * the first byte of a 1-byte-header varlena can never be zero, we can examine
34 * the first byte after the previous datum to tell if it's a pad byte or the
35 * start of a 1-byte-header varlena.
36 *
37 * Note that while formerly we could rely on the first varlena column of a
38 * system catalog to be at the offset suggested by the C struct for the
39 * catalog, this is now risky: it's only safe if the preceding field is
40 * word-aligned, so that there will never be any padding.
41 *
42 * We don't pack varlenas whose attstorage is 'p', since the data type
43 * isn't expecting to have to detoast values. This is used in particular
44 * by oidvector and int2vector, which are used in the system catalogs
45 * and we'd like to still refer to them via C struct offsets.
46 *
47 *
48 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
49 * Portions Copyright (c) 1994, Regents of the University of California
50 *
51 *
52 * IDENTIFICATION
53 * src/backend/access/common/heaptuple.c
54 *
55 *-------------------------------------------------------------------------
56 */
57
58#include "postgres.h"
59
60#include "access/sysattr.h"
61#include "access/tupdesc_details.h"
62#include "access/tuptoaster.h"
63#include "executor/tuptable.h"
64#include "utils/expandeddatum.h"
65
66
67/* Does att's datatype allow packing into the 1-byte-header varlena format? */
68#define ATT_IS_PACKABLE(att) \
69 ((att)->attlen == -1 && (att)->attstorage != 'p')
70/* Use this if it's already known varlena */
71#define VARLENA_ATT_IS_PACKABLE(att) \
72 ((att)->attstorage != 'p')
73
74
75/* ----------------------------------------------------------------
76 * misc support routines
77 * ----------------------------------------------------------------
78 */
79
80/*
81 * Return the missing value of an attribute, or NULL if there isn't one.
82 */
83Datum
84getmissingattr(TupleDesc tupleDesc,
85 int attnum, bool *isnull)
86{
87 Form_pg_attribute att;
88
89 Assert(attnum <= tupleDesc->natts);
90 Assert(attnum > 0);
91
92 att = TupleDescAttr(tupleDesc, attnum - 1);
93
94 if (att->atthasmissing)
95 {
96 AttrMissing *attrmiss;
97
98 Assert(tupleDesc->constr);
99 Assert(tupleDesc->constr->missing);
100
101 attrmiss = tupleDesc->constr->missing + (attnum - 1);
102
103 if (attrmiss->am_present)
104 {
105 *isnull = false;
106 return attrmiss->am_value;
107 }
108 }
109
110 *isnull = true;
111 return PointerGetDatum(NULL);
112}
113
114/*
115 * heap_compute_data_size
116 * Determine size of the data area of a tuple to be constructed
117 */
118Size
119heap_compute_data_size(TupleDesc tupleDesc,
120 Datum *values,
121 bool *isnull)
122{
123 Size data_length = 0;
124 int i;
125 int numberOfAttributes = tupleDesc->natts;
126
127 for (i = 0; i < numberOfAttributes; i++)
128 {
129 Datum val;
130 Form_pg_attribute atti;
131
132 if (isnull[i])
133 continue;
134
135 val = values[i];
136 atti = TupleDescAttr(tupleDesc, i);
137
138 if (ATT_IS_PACKABLE(atti) &&
139 VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
140 {
141 /*
142 * we're anticipating converting to a short varlena header, so
143 * adjust length and don't count any alignment
144 */
145 data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
146 }
147 else if (atti->attlen == -1 &&
148 VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
149 {
150 /*
151 * we want to flatten the expanded value so that the constructed
152 * tuple doesn't depend on it
153 */
154 data_length = att_align_nominal(data_length, atti->attalign);
155 data_length += EOH_get_flat_size(DatumGetEOHP(val));
156 }
157 else
158 {
159 data_length = att_align_datum(data_length, atti->attalign,
160 atti->attlen, val);
161 data_length = att_addlength_datum(data_length, atti->attlen,
162 val);
163 }
164 }
165
166 return data_length;
167}
168
169/*
170 * Per-attribute helper for heap_fill_tuple and other routines building tuples.
171 *
172 * Fill in either a data value or a bit in the null bitmask
173 */
174static inline void
175fill_val(Form_pg_attribute att,
176 bits8 **bit,
177 int *bitmask,
178 char **dataP,
179 uint16 *infomask,
180 Datum datum,
181 bool isnull)
182{
183 Size data_length;
184 char *data = *dataP;
185
186 /*
187 * If we're building a null bitmap, set the appropriate bit for the
188 * current column value here.
189 */
190 if (bit != NULL)
191 {
192 if (*bitmask != HIGHBIT)
193 *bitmask <<= 1;
194 else
195 {
196 *bit += 1;
197 **bit = 0x0;
198 *bitmask = 1;
199 }
200
201 if (isnull)
202 {
203 *infomask |= HEAP_HASNULL;
204 return;
205 }
206
207 **bit |= *bitmask;
208 }
209
210 /*
211 * XXX we use the att_align macros on the pointer value itself, not on an
212 * offset. This is a bit of a hack.
213 */
214 if (att->attbyval)
215 {
216 /* pass-by-value */
217 data = (char *) att_align_nominal(data, att->attalign);
218 store_att_byval(data, datum, att->attlen);
219 data_length = att->attlen;
220 }
221 else if (att->attlen == -1)
222 {
223 /* varlena */
224 Pointer val = DatumGetPointer(datum);
225
226 *infomask |= HEAP_HASVARWIDTH;
227 if (VARATT_IS_EXTERNAL(val))
228 {
229 if (VARATT_IS_EXTERNAL_EXPANDED(val))
230 {
231 /*
232 * we want to flatten the expanded value so that the
233 * constructed tuple doesn't depend on it
234 */
235 ExpandedObjectHeader *eoh = DatumGetEOHP(datum);
236
237 data = (char *) att_align_nominal(data,
238 att->attalign);
239 data_length = EOH_get_flat_size(eoh);
240 EOH_flatten_into(eoh, data, data_length);
241 }
242 else
243 {
244 *infomask |= HEAP_HASEXTERNAL;
245 /* no alignment, since it's short by definition */
246 data_length = VARSIZE_EXTERNAL(val);
247 memcpy(data, val, data_length);
248 }
249 }
250 else if (VARATT_IS_SHORT(val))
251 {
252 /* no alignment for short varlenas */
253 data_length = VARSIZE_SHORT(val);
254 memcpy(data, val, data_length);
255 }
256 else if (VARLENA_ATT_IS_PACKABLE(att) &&
257 VARATT_CAN_MAKE_SHORT(val))
258 {
259 /* convert to short varlena -- no alignment */
260 data_length = VARATT_CONVERTED_SHORT_SIZE(val);
261 SET_VARSIZE_SHORT(data, data_length);
262 memcpy(data + 1, VARDATA(val), data_length - 1);
263 }
264 else
265 {
266 /* full 4-byte header varlena */
267 data = (char *) att_align_nominal(data,
268 att->attalign);
269 data_length = VARSIZE(val);
270 memcpy(data, val, data_length);
271 }
272 }
273 else if (att->attlen == -2)
274 {
275 /* cstring ... never needs alignment */
276 *infomask |= HEAP_HASVARWIDTH;
277 Assert(att->attalign == 'c');
278 data_length = strlen(DatumGetCString(datum)) + 1;
279 memcpy(data, DatumGetPointer(datum), data_length);
280 }
281 else
282 {
283 /* fixed-length pass-by-reference */
284 data = (char *) att_align_nominal(data, att->attalign);
285 Assert(att->attlen > 0);
286 data_length = att->attlen;
287 memcpy(data, DatumGetPointer(datum), data_length);
288 }
289
290 data += data_length;
291 *dataP = data;
292}
293
294/*
295 * heap_fill_tuple
296 * Load data portion of a tuple from values/isnull arrays
297 *
298 * We also fill the null bitmap (if any) and set the infomask bits
299 * that reflect the tuple's data contents.
300 *
301 * NOTE: it is now REQUIRED that the caller have pre-zeroed the data area.
302 */
303void
304heap_fill_tuple(TupleDesc tupleDesc,
305 Datum *values, bool *isnull,
306 char *data, Size data_size,
307 uint16 *infomask, bits8 *bit)
308{
309 bits8 *bitP;
310 int bitmask;
311 int i;
312 int numberOfAttributes = tupleDesc->natts;
313
314#ifdef USE_ASSERT_CHECKING
315 char *start = data;
316#endif
317
318 if (bit != NULL)
319 {
320 bitP = &bit[-1];
321 bitmask = HIGHBIT;
322 }
323 else
324 {
325 /* just to keep compiler quiet */
326 bitP = NULL;
327 bitmask = 0;
328 }
329
330 *infomask &= ~(HEAP_HASNULL | HEAP_HASVARWIDTH | HEAP_HASEXTERNAL);
331
332 for (i = 0; i < numberOfAttributes; i++)
333 {
334 Form_pg_attribute attr = TupleDescAttr(tupleDesc, i);
335
336 fill_val(attr,
337 bitP ? &bitP : NULL,
338 &bitmask,
339 &data,
340 infomask,
341 values ? values[i] : PointerGetDatum(NULL),
342 isnull ? isnull[i] : true);
343 }
344
345 Assert((data - start) == data_size);
346}
347
348
349/* ----------------------------------------------------------------
350 * heap tuple interface
351 * ----------------------------------------------------------------
352 */
353
354/* ----------------
355 * heap_attisnull - returns true iff tuple attribute is not present
356 * ----------------
357 */
358bool
359heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc)
360{
361 /*
362 * We allow a NULL tupledesc for relations not expected to have missing
363 * values, such as catalog relations and indexes.
364 */
365 Assert(!tupleDesc || attnum <= tupleDesc->natts);
366 if (attnum > (int) HeapTupleHeaderGetNatts(tup->t_data))
367 {
368 if (tupleDesc && TupleDescAttr(tupleDesc, attnum - 1)->atthasmissing)
369 return false;
370 else
371 return true;
372 }
373
374 if (attnum > 0)
375 {
376 if (HeapTupleNoNulls(tup))
377 return false;
378 return att_isnull(attnum - 1, tup->t_data->t_bits);
379 }
380
381 switch (attnum)
382 {
383 case TableOidAttributeNumber:
384 case SelfItemPointerAttributeNumber:
385 case MinTransactionIdAttributeNumber:
386 case MinCommandIdAttributeNumber:
387 case MaxTransactionIdAttributeNumber:
388 case MaxCommandIdAttributeNumber:
389 /* these are never null */
390 break;
391
392 default:
393 elog(ERROR, "invalid attnum: %d", attnum);
394 }
395
396 return false;
397}
398
399/* ----------------
400 * nocachegetattr
401 *
402 * This only gets called from fastgetattr() macro, in cases where
403 * we can't use a cacheoffset and the value is not null.
404 *
405 * This caches attribute offsets in the attribute descriptor.
406 *
407 * An alternative way to speed things up would be to cache offsets
408 * with the tuple, but that seems more difficult unless you take
409 * the storage hit of actually putting those offsets into the
410 * tuple you send to disk. Yuck.
411 *
412 * This scheme will be slightly slower than that, but should
413 * perform well for queries which hit large #'s of tuples. After
414 * you cache the offsets once, examining all the other tuples using
415 * the same attribute descriptor will go much quicker. -cim 5/4/91
416 *
417 * NOTE: if you need to change this code, see also heap_deform_tuple.
418 * Also see nocache_index_getattr, which is the same code for index
419 * tuples.
420 * ----------------
421 */
422Datum
423nocachegetattr(HeapTuple tuple,
424 int attnum,
425 TupleDesc tupleDesc)
426{
427 HeapTupleHeader tup = tuple->t_data;
428 char *tp; /* ptr to data part of tuple */
429 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
430 bool slow = false; /* do we have to walk attrs? */
431 int off; /* current offset within data */
432
433 /* ----------------
434 * Three cases:
435 *
436 * 1: No nulls and no variable-width attributes.
437 * 2: Has a null or a var-width AFTER att.
438 * 3: Has nulls or var-widths BEFORE att.
439 * ----------------
440 */
441
442 attnum--;
443
444 if (!HeapTupleNoNulls(tuple))
445 {
446 /*
447 * there's a null somewhere in the tuple
448 *
449 * check to see if any preceding bits are null...
450 */
451 int byte = attnum >> 3;
452 int finalbit = attnum & 0x07;
453
454 /* check for nulls "before" final bit of last byte */
455 if ((~bp[byte]) & ((1 << finalbit) - 1))
456 slow = true;
457 else
458 {
459 /* check for nulls in any "earlier" bytes */
460 int i;
461
462 for (i = 0; i < byte; i++)
463 {
464 if (bp[i] != 0xFF)
465 {
466 slow = true;
467 break;
468 }
469 }
470 }
471 }
472
473 tp = (char *) tup + tup->t_hoff;
474
475 if (!slow)
476 {
477 Form_pg_attribute att;
478
479 /*
480 * If we get here, there are no nulls up to and including the target
481 * attribute. If we have a cached offset, we can use it.
482 */
483 att = TupleDescAttr(tupleDesc, attnum);
484 if (att->attcacheoff >= 0)
485 return fetchatt(att, tp + att->attcacheoff);
486
487 /*
488 * Otherwise, check for non-fixed-length attrs up to and including
489 * target. If there aren't any, it's safe to cheaply initialize the
490 * cached offsets for these attrs.
491 */
492 if (HeapTupleHasVarWidth(tuple))
493 {
494 int j;
495
496 for (j = 0; j <= attnum; j++)
497 {
498 if (TupleDescAttr(tupleDesc, j)->attlen <= 0)
499 {
500 slow = true;
501 break;
502 }
503 }
504 }
505 }
506
507 if (!slow)
508 {
509 int natts = tupleDesc->natts;
510 int j = 1;
511
512 /*
513 * If we get here, we have a tuple with no nulls or var-widths up to
514 * and including the target attribute, so we can use the cached offset
515 * ... only we don't have it yet, or we'd not have got here. Since
516 * it's cheap to compute offsets for fixed-width columns, we take the
517 * opportunity to initialize the cached offsets for *all* the leading
518 * fixed-width columns, in hope of avoiding future visits to this
519 * routine.
520 */
521 TupleDescAttr(tupleDesc, 0)->attcacheoff = 0;
522
523 /* we might have set some offsets in the slow path previously */
524 while (j < natts && TupleDescAttr(tupleDesc, j)->attcacheoff > 0)
525 j++;
526
527 off = TupleDescAttr(tupleDesc, j - 1)->attcacheoff +
528 TupleDescAttr(tupleDesc, j - 1)->attlen;
529
530 for (; j < natts; j++)
531 {
532 Form_pg_attribute att = TupleDescAttr(tupleDesc, j);
533
534 if (att->attlen <= 0)
535 break;
536
537 off = att_align_nominal(off, att->attalign);
538
539 att->attcacheoff = off;
540
541 off += att->attlen;
542 }
543
544 Assert(j > attnum);
545
546 off = TupleDescAttr(tupleDesc, attnum)->attcacheoff;
547 }
548 else
549 {
550 bool usecache = true;
551 int i;
552
553 /*
554 * Now we know that we have to walk the tuple CAREFULLY. But we still
555 * might be able to cache some offsets for next time.
556 *
557 * Note - This loop is a little tricky. For each non-null attribute,
558 * we have to first account for alignment padding before the attr,
559 * then advance over the attr based on its length. Nulls have no
560 * storage and no alignment padding either. We can use/set
561 * attcacheoff until we reach either a null or a var-width attribute.
562 */
563 off = 0;
564 for (i = 0;; i++) /* loop exit is at "break" */
565 {
566 Form_pg_attribute att = TupleDescAttr(tupleDesc, i);
567
568 if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
569 {
570 usecache = false;
571 continue; /* this cannot be the target att */
572 }
573
574 /* If we know the next offset, we can skip the rest */
575 if (usecache && att->attcacheoff >= 0)
576 off = att->attcacheoff;
577 else if (att->attlen == -1)
578 {
579 /*
580 * We can only cache the offset for a varlena attribute if the
581 * offset is already suitably aligned, so that there would be
582 * no pad bytes in any case: then the offset will be valid for
583 * either an aligned or unaligned value.
584 */
585 if (usecache &&
586 off == att_align_nominal(off, att->attalign))
587 att->attcacheoff = off;
588 else
589 {
590 off = att_align_pointer(off, att->attalign, -1,
591 tp + off);
592 usecache = false;
593 }
594 }
595 else
596 {
597 /* not varlena, so safe to use att_align_nominal */
598 off = att_align_nominal(off, att->attalign);
599
600 if (usecache)
601 att->attcacheoff = off;
602 }
603
604 if (i == attnum)
605 break;
606
607 off = att_addlength_pointer(off, att->attlen, tp + off);
608
609 if (usecache && att->attlen <= 0)
610 usecache = false;
611 }
612 }
613
614 return fetchatt(TupleDescAttr(tupleDesc, attnum), tp + off);
615}
616
617/* ----------------
618 * heap_getsysattr
619 *
620 * Fetch the value of a system attribute for a tuple.
621 *
622 * This is a support routine for the heap_getattr macro. The macro
623 * has already determined that the attnum refers to a system attribute.
624 * ----------------
625 */
626Datum
627heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
628{
629 Datum result;
630
631 Assert(tup);
632
633 /* Currently, no sys attribute ever reads as NULL. */
634 *isnull = false;
635
636 switch (attnum)
637 {
638 case SelfItemPointerAttributeNumber:
639 /* pass-by-reference datatype */
640 result = PointerGetDatum(&(tup->t_self));
641 break;
642 case MinTransactionIdAttributeNumber:
643 result = TransactionIdGetDatum(HeapTupleHeaderGetRawXmin(tup->t_data));
644 break;
645 case MaxTransactionIdAttributeNumber:
646 result = TransactionIdGetDatum(HeapTupleHeaderGetRawXmax(tup->t_data));
647 break;
648 case MinCommandIdAttributeNumber:
649 case MaxCommandIdAttributeNumber:
650
651 /*
652 * cmin and cmax are now both aliases for the same field, which
653 * can in fact also be a combo command id. XXX perhaps we should
654 * return the "real" cmin or cmax if possible, that is if we are
655 * inside the originating transaction?
656 */
657 result = CommandIdGetDatum(HeapTupleHeaderGetRawCommandId(tup->t_data));
658 break;
659 case TableOidAttributeNumber:
660 result = ObjectIdGetDatum(tup->t_tableOid);
661 break;
662 default:
663 elog(ERROR, "invalid attnum: %d", attnum);
664 result = 0; /* keep compiler quiet */
665 break;
666 }
667 return result;
668}
669
670/* ----------------
671 * heap_copytuple
672 *
673 * returns a copy of an entire tuple
674 *
675 * The HeapTuple struct, tuple header, and tuple data are all allocated
676 * as a single palloc() block.
677 * ----------------
678 */
679HeapTuple
680heap_copytuple(HeapTuple tuple)
681{
682 HeapTuple newTuple;
683
684 if (!HeapTupleIsValid(tuple) || tuple->t_data == NULL)
685 return NULL;
686
687 newTuple = (HeapTuple) palloc(HEAPTUPLESIZE + tuple->t_len);
688 newTuple->t_len = tuple->t_len;
689 newTuple->t_self = tuple->t_self;
690 newTuple->t_tableOid = tuple->t_tableOid;
691 newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
692 memcpy((char *) newTuple->t_data, (char *) tuple->t_data, tuple->t_len);
693 return newTuple;
694}
695
696/* ----------------
697 * heap_copytuple_with_tuple
698 *
699 * copy a tuple into a caller-supplied HeapTuple management struct
700 *
701 * Note that after calling this function, the "dest" HeapTuple will not be
702 * allocated as a single palloc() block (unlike with heap_copytuple()).
703 * ----------------
704 */
705void
706heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
707{
708 if (!HeapTupleIsValid(src) || src->t_data == NULL)
709 {
710 dest->t_data = NULL;
711 return;
712 }
713
714 dest->t_len = src->t_len;
715 dest->t_self = src->t_self;
716 dest->t_tableOid = src->t_tableOid;
717 dest->t_data = (HeapTupleHeader) palloc(src->t_len);
718 memcpy((char *) dest->t_data, (char *) src->t_data, src->t_len);
719}
720
721/*
722 * Expand a tuple which has less attributes than required. For each attribute
723 * not present in the sourceTuple, if there is a missing value that will be
724 * used. Otherwise the attribute will be set to NULL.
725 *
726 * The source tuple must have less attributes than the required number.
727 *
728 * Only one of targetHeapTuple and targetMinimalTuple may be supplied. The
729 * other argument must be NULL.
730 */
731static void
732expand_tuple(HeapTuple *targetHeapTuple,
733 MinimalTuple *targetMinimalTuple,
734 HeapTuple sourceTuple,
735 TupleDesc tupleDesc)
736{
737 AttrMissing *attrmiss = NULL;
738 int attnum;
739 int firstmissingnum = 0;
740 bool hasNulls = HeapTupleHasNulls(sourceTuple);
741 HeapTupleHeader targetTHeader;
742 HeapTupleHeader sourceTHeader = sourceTuple->t_data;
743 int sourceNatts = HeapTupleHeaderGetNatts(sourceTHeader);
744 int natts = tupleDesc->natts;
745 int sourceNullLen;
746 int targetNullLen;
747 Size sourceDataLen = sourceTuple->t_len - sourceTHeader->t_hoff;
748 Size targetDataLen;
749 Size len;
750 int hoff;
751 bits8 *nullBits = NULL;
752 int bitMask = 0;
753 char *targetData;
754 uint16 *infoMask;
755
756 Assert((targetHeapTuple && !targetMinimalTuple)
757 || (!targetHeapTuple && targetMinimalTuple));
758
759 Assert(sourceNatts < natts);
760
761 sourceNullLen = (hasNulls ? BITMAPLEN(sourceNatts) : 0);
762
763 targetDataLen = sourceDataLen;
764
765 if (tupleDesc->constr &&
766 tupleDesc->constr->missing)
767 {
768 /*
769 * If there are missing values we want to put them into the tuple.
770 * Before that we have to compute the extra length for the values
771 * array and the variable length data.
772 */
773 attrmiss = tupleDesc->constr->missing;
774
775 /*
776 * Find the first item in attrmiss for which we don't have a value in
777 * the source. We can ignore all the missing entries before that.
778 */
779 for (firstmissingnum = sourceNatts;
780 firstmissingnum < natts;
781 firstmissingnum++)
782 {
783 if (attrmiss[firstmissingnum].am_present)
784 break;
785 else
786 hasNulls = true;
787 }
788
789 /*
790 * Now walk the missing attributes. If there is a missing value make
791 * space for it. Otherwise, it's going to be NULL.
792 */
793 for (attnum = firstmissingnum;
794 attnum < natts;
795 attnum++)
796 {
797 if (attrmiss[attnum].am_present)
798 {
799 Form_pg_attribute att = TupleDescAttr(tupleDesc, attnum);
800
801 targetDataLen = att_align_datum(targetDataLen,
802 att->attalign,
803 att->attlen,
804 attrmiss[attnum].am_value);
805
806 targetDataLen = att_addlength_pointer(targetDataLen,
807 att->attlen,
808 attrmiss[attnum].am_value);
809 }
810 else
811 {
812 /* no missing value, so it must be null */
813 hasNulls = true;
814 }
815 }
816 } /* end if have missing values */
817 else
818 {
819 /*
820 * If there are no missing values at all then NULLS must be allowed,
821 * since some of the attributes are known to be absent.
822 */
823 hasNulls = true;
824 }
825
826 len = 0;
827
828 if (hasNulls)
829 {
830 targetNullLen = BITMAPLEN(natts);
831 len += targetNullLen;
832 }
833 else
834 targetNullLen = 0;
835
836 /*
837 * Allocate and zero the space needed. Note that the tuple body and
838 * HeapTupleData management structure are allocated in one chunk.
839 */
840 if (targetHeapTuple)
841 {
842 len += offsetof(HeapTupleHeaderData, t_bits);
843 hoff = len = MAXALIGN(len); /* align user data safely */
844 len += targetDataLen;
845
846 *targetHeapTuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
847 (*targetHeapTuple)->t_data
848 = targetTHeader
849 = (HeapTupleHeader) ((char *) *targetHeapTuple + HEAPTUPLESIZE);
850 (*targetHeapTuple)->t_len = len;
851 (*targetHeapTuple)->t_tableOid = sourceTuple->t_tableOid;
852 (*targetHeapTuple)->t_self = sourceTuple->t_self;
853
854 targetTHeader->t_infomask = sourceTHeader->t_infomask;
855 targetTHeader->t_hoff = hoff;
856 HeapTupleHeaderSetNatts(targetTHeader, natts);
857 HeapTupleHeaderSetDatumLength(targetTHeader, len);
858 HeapTupleHeaderSetTypeId(targetTHeader, tupleDesc->tdtypeid);
859 HeapTupleHeaderSetTypMod(targetTHeader, tupleDesc->tdtypmod);
860 /* We also make sure that t_ctid is invalid unless explicitly set */
861 ItemPointerSetInvalid(&(targetTHeader->t_ctid));
862 if (targetNullLen > 0)
863 nullBits = (bits8 *) ((char *) (*targetHeapTuple)->t_data
864 + offsetof(HeapTupleHeaderData, t_bits));
865 targetData = (char *) (*targetHeapTuple)->t_data + hoff;
866 infoMask = &(targetTHeader->t_infomask);
867 }
868 else
869 {
870 len += SizeofMinimalTupleHeader;
871 hoff = len = MAXALIGN(len); /* align user data safely */
872 len += targetDataLen;
873
874 *targetMinimalTuple = (MinimalTuple) palloc0(len);
875 (*targetMinimalTuple)->t_len = len;
876 (*targetMinimalTuple)->t_hoff = hoff + MINIMAL_TUPLE_OFFSET;
877 (*targetMinimalTuple)->t_infomask = sourceTHeader->t_infomask;
878 /* Same macro works for MinimalTuples */
879 HeapTupleHeaderSetNatts(*targetMinimalTuple, natts);
880 if (targetNullLen > 0)
881 nullBits = (bits8 *) ((char *) *targetMinimalTuple
882 + offsetof(MinimalTupleData, t_bits));
883 targetData = (char *) *targetMinimalTuple + hoff;
884 infoMask = &((*targetMinimalTuple)->t_infomask);
885 }
886
887 if (targetNullLen > 0)
888 {
889 if (sourceNullLen > 0)
890 {
891 /* if bitmap pre-existed copy in - all is set */
892 memcpy(nullBits,
893 ((char *) sourceTHeader)
894 + offsetof(HeapTupleHeaderData, t_bits),
895 sourceNullLen);
896 nullBits += sourceNullLen - 1;
897 }
898 else
899 {
900 sourceNullLen = BITMAPLEN(sourceNatts);
901 /* Set NOT NULL for all existing attributes */
902 memset(nullBits, 0xff, sourceNullLen);
903
904 nullBits += sourceNullLen - 1;
905
906 if (sourceNatts & 0x07)
907 {
908 /* build the mask (inverted!) */
909 bitMask = 0xff << (sourceNatts & 0x07);
910 /* Voila */
911 *nullBits = ~bitMask;
912 }
913 }
914
915 bitMask = (1 << ((sourceNatts - 1) & 0x07));
916 } /* End if have null bitmap */
917
918 memcpy(targetData,
919 ((char *) sourceTuple->t_data) + sourceTHeader->t_hoff,
920 sourceDataLen);
921
922 targetData += sourceDataLen;
923
924 /* Now fill in the missing values */
925 for (attnum = sourceNatts; attnum < natts; attnum++)
926 {
927
928 Form_pg_attribute attr = TupleDescAttr(tupleDesc, attnum);
929
930 if (attrmiss && attrmiss[attnum].am_present)
931 {
932 fill_val(attr,
933 nullBits ? &nullBits : NULL,
934 &bitMask,
935 &targetData,
936 infoMask,
937 attrmiss[attnum].am_value,
938 false);
939 }
940 else
941 {
942 fill_val(attr,
943 &nullBits,
944 &bitMask,
945 &targetData,
946 infoMask,
947 (Datum) 0,
948 true);
949 }
950 } /* end loop over missing attributes */
951}
952
953/*
954 * Fill in the missing values for a minimal HeapTuple
955 */
956MinimalTuple
957minimal_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc)
958{
959 MinimalTuple minimalTuple;
960
961 expand_tuple(NULL, &minimalTuple, sourceTuple, tupleDesc);
962 return minimalTuple;
963}
964
965/*
966 * Fill in the missing values for an ordinary HeapTuple
967 */
968HeapTuple
969heap_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc)
970{
971 HeapTuple heapTuple;
972
973 expand_tuple(&heapTuple, NULL, sourceTuple, tupleDesc);
974 return heapTuple;
975}
976
977/* ----------------
978 * heap_copy_tuple_as_datum
979 *
980 * copy a tuple as a composite-type Datum
981 * ----------------
982 */
983Datum
984heap_copy_tuple_as_datum(HeapTuple tuple, TupleDesc tupleDesc)
985{
986 HeapTupleHeader td;
987
988 /*
989 * If the tuple contains any external TOAST pointers, we have to inline
990 * those fields to meet the conventions for composite-type Datums.
991 */
992 if (HeapTupleHasExternal(tuple))
993 return toast_flatten_tuple_to_datum(tuple->t_data,
994 tuple->t_len,
995 tupleDesc);
996
997 /*
998 * Fast path for easy case: just make a palloc'd copy and insert the
999 * correct composite-Datum header fields (since those may not be set if
1000 * the given tuple came from disk, rather than from heap_form_tuple).
1001 */
1002 td = (HeapTupleHeader) palloc(tuple->t_len);
1003 memcpy((char *) td, (char *) tuple->t_data, tuple->t_len);
1004
1005 HeapTupleHeaderSetDatumLength(td, tuple->t_len);
1006 HeapTupleHeaderSetTypeId(td, tupleDesc->tdtypeid);
1007 HeapTupleHeaderSetTypMod(td, tupleDesc->tdtypmod);
1008
1009 return PointerGetDatum(td);
1010}
1011
1012/*
1013 * heap_form_tuple
1014 * construct a tuple from the given values[] and isnull[] arrays,
1015 * which are of the length indicated by tupleDescriptor->natts
1016 *
1017 * The result is allocated in the current memory context.
1018 */
1019HeapTuple
1020heap_form_tuple(TupleDesc tupleDescriptor,
1021 Datum *values,
1022 bool *isnull)
1023{
1024 HeapTuple tuple; /* return tuple */
1025 HeapTupleHeader td; /* tuple data */
1026 Size len,
1027 data_len;
1028 int hoff;
1029 bool hasnull = false;
1030 int numberOfAttributes = tupleDescriptor->natts;
1031 int i;
1032
1033 if (numberOfAttributes > MaxTupleAttributeNumber)
1034 ereport(ERROR,
1035 (errcode(ERRCODE_TOO_MANY_COLUMNS),
1036 errmsg("number of columns (%d) exceeds limit (%d)",
1037 numberOfAttributes, MaxTupleAttributeNumber)));
1038
1039 /*
1040 * Check for nulls
1041 */
1042 for (i = 0; i < numberOfAttributes; i++)
1043 {
1044 if (isnull[i])
1045 {
1046 hasnull = true;
1047 break;
1048 }
1049 }
1050
1051 /*
1052 * Determine total space needed
1053 */
1054 len = offsetof(HeapTupleHeaderData, t_bits);
1055
1056 if (hasnull)
1057 len += BITMAPLEN(numberOfAttributes);
1058
1059 hoff = len = MAXALIGN(len); /* align user data safely */
1060
1061 data_len = heap_compute_data_size(tupleDescriptor, values, isnull);
1062
1063 len += data_len;
1064
1065 /*
1066 * Allocate and zero the space needed. Note that the tuple body and
1067 * HeapTupleData management structure are allocated in one chunk.
1068 */
1069 tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
1070 tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
1071
1072 /*
1073 * And fill in the information. Note we fill the Datum fields even though
1074 * this tuple may never become a Datum. This lets HeapTupleHeaderGetDatum
1075 * identify the tuple type if needed.
1076 */
1077 tuple->t_len = len;
1078 ItemPointerSetInvalid(&(tuple->t_self));
1079 tuple->t_tableOid = InvalidOid;
1080
1081 HeapTupleHeaderSetDatumLength(td, len);
1082 HeapTupleHeaderSetTypeId(td, tupleDescriptor->tdtypeid);
1083 HeapTupleHeaderSetTypMod(td, tupleDescriptor->tdtypmod);
1084 /* We also make sure that t_ctid is invalid unless explicitly set */
1085 ItemPointerSetInvalid(&(td->t_ctid));
1086
1087 HeapTupleHeaderSetNatts(td, numberOfAttributes);
1088 td->t_hoff = hoff;
1089
1090 heap_fill_tuple(tupleDescriptor,
1091 values,
1092 isnull,
1093 (char *) td + hoff,
1094 data_len,
1095 &td->t_infomask,
1096 (hasnull ? td->t_bits : NULL));
1097
1098 return tuple;
1099}
1100
1101/*
1102 * heap_modify_tuple
1103 * form a new tuple from an old tuple and a set of replacement values.
1104 *
1105 * The replValues, replIsnull, and doReplace arrays must be of the length
1106 * indicated by tupleDesc->natts. The new tuple is constructed using the data
1107 * from replValues/replIsnull at columns where doReplace is true, and using
1108 * the data from the old tuple at columns where doReplace is false.
1109 *
1110 * The result is allocated in the current memory context.
1111 */
1112HeapTuple
1113heap_modify_tuple(HeapTuple tuple,
1114 TupleDesc tupleDesc,
1115 Datum *replValues,
1116 bool *replIsnull,
1117 bool *doReplace)
1118{
1119 int numberOfAttributes = tupleDesc->natts;
1120 int attoff;
1121 Datum *values;
1122 bool *isnull;
1123 HeapTuple newTuple;
1124
1125 /*
1126 * allocate and fill values and isnull arrays from either the tuple or the
1127 * repl information, as appropriate.
1128 *
1129 * NOTE: it's debatable whether to use heap_deform_tuple() here or just
1130 * heap_getattr() only the non-replaced columns. The latter could win if
1131 * there are many replaced columns and few non-replaced ones. However,
1132 * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
1133 * O(N^2) if there are many non-replaced columns, so it seems better to
1134 * err on the side of linear cost.
1135 */
1136 values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
1137 isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
1138
1139 heap_deform_tuple(tuple, tupleDesc, values, isnull);
1140
1141 for (attoff = 0; attoff < numberOfAttributes; attoff++)
1142 {
1143 if (doReplace[attoff])
1144 {
1145 values[attoff] = replValues[attoff];
1146 isnull[attoff] = replIsnull[attoff];
1147 }
1148 }
1149
1150 /*
1151 * create a new tuple from the values and isnull arrays
1152 */
1153 newTuple = heap_form_tuple(tupleDesc, values, isnull);
1154
1155 pfree(values);
1156 pfree(isnull);
1157
1158 /*
1159 * copy the identification info of the old tuple: t_ctid, t_self
1160 */
1161 newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
1162 newTuple->t_self = tuple->t_self;
1163 newTuple->t_tableOid = tuple->t_tableOid;
1164
1165 return newTuple;
1166}
1167
1168/*
1169 * heap_modify_tuple_by_cols
1170 * form a new tuple from an old tuple and a set of replacement values.
1171 *
1172 * This is like heap_modify_tuple, except that instead of specifying which
1173 * column(s) to replace by a boolean map, an array of target column numbers
1174 * is used. This is often more convenient when a fixed number of columns
1175 * are to be replaced. The replCols, replValues, and replIsnull arrays must
1176 * be of length nCols. Target column numbers are indexed from 1.
1177 *
1178 * The result is allocated in the current memory context.
1179 */
1180HeapTuple
1181heap_modify_tuple_by_cols(HeapTuple tuple,
1182 TupleDesc tupleDesc,
1183 int nCols,
1184 int *replCols,
1185 Datum *replValues,
1186 bool *replIsnull)
1187{
1188 int numberOfAttributes = tupleDesc->natts;
1189 Datum *values;
1190 bool *isnull;
1191 HeapTuple newTuple;
1192 int i;
1193
1194 /*
1195 * allocate and fill values and isnull arrays from the tuple, then replace
1196 * selected columns from the input arrays.
1197 */
1198 values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
1199 isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
1200
1201 heap_deform_tuple(tuple, tupleDesc, values, isnull);
1202
1203 for (i = 0; i < nCols; i++)
1204 {
1205 int attnum = replCols[i];
1206
1207 if (attnum <= 0 || attnum > numberOfAttributes)
1208 elog(ERROR, "invalid column number %d", attnum);
1209 values[attnum - 1] = replValues[i];
1210 isnull[attnum - 1] = replIsnull[i];
1211 }
1212
1213 /*
1214 * create a new tuple from the values and isnull arrays
1215 */
1216 newTuple = heap_form_tuple(tupleDesc, values, isnull);
1217
1218 pfree(values);
1219 pfree(isnull);
1220
1221 /*
1222 * copy the identification info of the old tuple: t_ctid, t_self
1223 */
1224 newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
1225 newTuple->t_self = tuple->t_self;
1226 newTuple->t_tableOid = tuple->t_tableOid;
1227
1228 return newTuple;
1229}
1230
1231/*
1232 * heap_deform_tuple
1233 * Given a tuple, extract data into values/isnull arrays; this is
1234 * the inverse of heap_form_tuple.
1235 *
1236 * Storage for the values/isnull arrays is provided by the caller;
1237 * it should be sized according to tupleDesc->natts not
1238 * HeapTupleHeaderGetNatts(tuple->t_data).
1239 *
1240 * Note that for pass-by-reference datatypes, the pointer placed
1241 * in the Datum will point into the given tuple.
1242 *
1243 * When all or most of a tuple's fields need to be extracted,
1244 * this routine will be significantly quicker than a loop around
1245 * heap_getattr; the loop will become O(N^2) as soon as any
1246 * noncacheable attribute offsets are involved.
1247 */
1248void
1249heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
1250 Datum *values, bool *isnull)
1251{
1252 HeapTupleHeader tup = tuple->t_data;
1253 bool hasnulls = HeapTupleHasNulls(tuple);
1254 int tdesc_natts = tupleDesc->natts;
1255 int natts; /* number of atts to extract */
1256 int attnum;
1257 char *tp; /* ptr to tuple data */
1258 uint32 off; /* offset in tuple data */
1259 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
1260 bool slow = false; /* can we use/set attcacheoff? */
1261
1262 natts = HeapTupleHeaderGetNatts(tup);
1263
1264 /*
1265 * In inheritance situations, it is possible that the given tuple actually
1266 * has more fields than the caller is expecting. Don't run off the end of
1267 * the caller's arrays.
1268 */
1269 natts = Min(natts, tdesc_natts);
1270
1271 tp = (char *) tup + tup->t_hoff;
1272
1273 off = 0;
1274
1275 for (attnum = 0; attnum < natts; attnum++)
1276 {
1277 Form_pg_attribute thisatt = TupleDescAttr(tupleDesc, attnum);
1278
1279 if (hasnulls && att_isnull(attnum, bp))
1280 {
1281 values[attnum] = (Datum) 0;
1282 isnull[attnum] = true;
1283 slow = true; /* can't use attcacheoff anymore */
1284 continue;
1285 }
1286
1287 isnull[attnum] = false;
1288
1289 if (!slow && thisatt->attcacheoff >= 0)
1290 off = thisatt->attcacheoff;
1291 else if (thisatt->attlen == -1)
1292 {
1293 /*
1294 * We can only cache the offset for a varlena attribute if the
1295 * offset is already suitably aligned, so that there would be no
1296 * pad bytes in any case: then the offset will be valid for either
1297 * an aligned or unaligned value.
1298 */
1299 if (!slow &&
1300 off == att_align_nominal(off, thisatt->attalign))
1301 thisatt->attcacheoff = off;
1302 else
1303 {
1304 off = att_align_pointer(off, thisatt->attalign, -1,
1305 tp + off);
1306 slow = true;
1307 }
1308 }
1309 else
1310 {
1311 /* not varlena, so safe to use att_align_nominal */
1312 off = att_align_nominal(off, thisatt->attalign);
1313
1314 if (!slow)
1315 thisatt->attcacheoff = off;
1316 }
1317
1318 values[attnum] = fetchatt(thisatt, tp + off);
1319
1320 off = att_addlength_pointer(off, thisatt->attlen, tp + off);
1321
1322 if (thisatt->attlen <= 0)
1323 slow = true; /* can't use attcacheoff anymore */
1324 }
1325
1326 /*
1327 * If tuple doesn't have all the atts indicated by tupleDesc, read the
1328 * rest as nulls or missing values as appropriate.
1329 */
1330 for (; attnum < tdesc_natts; attnum++)
1331 values[attnum] = getmissingattr(tupleDesc, attnum + 1, &isnull[attnum]);
1332}
1333
1334/*
1335 * heap_freetuple
1336 */
1337void
1338heap_freetuple(HeapTuple htup)
1339{
1340 pfree(htup);
1341}
1342
1343
1344/*
1345 * heap_form_minimal_tuple
1346 * construct a MinimalTuple from the given values[] and isnull[] arrays,
1347 * which are of the length indicated by tupleDescriptor->natts
1348 *
1349 * This is exactly like heap_form_tuple() except that the result is a
1350 * "minimal" tuple lacking a HeapTupleData header as well as room for system
1351 * columns.
1352 *
1353 * The result is allocated in the current memory context.
1354 */
1355MinimalTuple
1356heap_form_minimal_tuple(TupleDesc tupleDescriptor,
1357 Datum *values,
1358 bool *isnull)
1359{
1360 MinimalTuple tuple; /* return tuple */
1361 Size len,
1362 data_len;
1363 int hoff;
1364 bool hasnull = false;
1365 int numberOfAttributes = tupleDescriptor->natts;
1366 int i;
1367
1368 if (numberOfAttributes > MaxTupleAttributeNumber)
1369 ereport(ERROR,
1370 (errcode(ERRCODE_TOO_MANY_COLUMNS),
1371 errmsg("number of columns (%d) exceeds limit (%d)",
1372 numberOfAttributes, MaxTupleAttributeNumber)));
1373
1374 /*
1375 * Check for nulls
1376 */
1377 for (i = 0; i < numberOfAttributes; i++)
1378 {
1379 if (isnull[i])
1380 {
1381 hasnull = true;
1382 break;
1383 }
1384 }
1385
1386 /*
1387 * Determine total space needed
1388 */
1389 len = SizeofMinimalTupleHeader;
1390
1391 if (hasnull)
1392 len += BITMAPLEN(numberOfAttributes);
1393
1394 hoff = len = MAXALIGN(len); /* align user data safely */
1395
1396 data_len = heap_compute_data_size(tupleDescriptor, values, isnull);
1397
1398 len += data_len;
1399
1400 /*
1401 * Allocate and zero the space needed.
1402 */
1403 tuple = (MinimalTuple) palloc0(len);
1404
1405 /*
1406 * And fill in the information.
1407 */
1408 tuple->t_len = len;
1409 HeapTupleHeaderSetNatts(tuple, numberOfAttributes);
1410 tuple->t_hoff = hoff + MINIMAL_TUPLE_OFFSET;
1411
1412 heap_fill_tuple(tupleDescriptor,
1413 values,
1414 isnull,
1415 (char *) tuple + hoff,
1416 data_len,
1417 &tuple->t_infomask,
1418 (hasnull ? tuple->t_bits : NULL));
1419
1420 return tuple;
1421}
1422
1423/*
1424 * heap_free_minimal_tuple
1425 */
1426void
1427heap_free_minimal_tuple(MinimalTuple mtup)
1428{
1429 pfree(mtup);
1430}
1431
1432/*
1433 * heap_copy_minimal_tuple
1434 * copy a MinimalTuple
1435 *
1436 * The result is allocated in the current memory context.
1437 */
1438MinimalTuple
1439heap_copy_minimal_tuple(MinimalTuple mtup)
1440{
1441 MinimalTuple result;
1442
1443 result = (MinimalTuple) palloc(mtup->t_len);
1444 memcpy(result, mtup, mtup->t_len);
1445 return result;
1446}
1447
1448/*
1449 * heap_tuple_from_minimal_tuple
1450 * create a HeapTuple by copying from a MinimalTuple;
1451 * system columns are filled with zeroes
1452 *
1453 * The result is allocated in the current memory context.
1454 * The HeapTuple struct, tuple header, and tuple data are all allocated
1455 * as a single palloc() block.
1456 */
1457HeapTuple
1458heap_tuple_from_minimal_tuple(MinimalTuple mtup)
1459{
1460 HeapTuple result;
1461 uint32 len = mtup->t_len + MINIMAL_TUPLE_OFFSET;
1462
1463 result = (HeapTuple) palloc(HEAPTUPLESIZE + len);
1464 result->t_len = len;
1465 ItemPointerSetInvalid(&(result->t_self));
1466 result->t_tableOid = InvalidOid;
1467 result->t_data = (HeapTupleHeader) ((char *) result + HEAPTUPLESIZE);
1468 memcpy((char *) result->t_data + MINIMAL_TUPLE_OFFSET, mtup, mtup->t_len);
1469 memset(result->t_data, 0, offsetof(HeapTupleHeaderData, t_infomask2));
1470 return result;
1471}
1472
1473/*
1474 * minimal_tuple_from_heap_tuple
1475 * create a MinimalTuple by copying from a HeapTuple
1476 *
1477 * The result is allocated in the current memory context.
1478 */
1479MinimalTuple
1480minimal_tuple_from_heap_tuple(HeapTuple htup)
1481{
1482 MinimalTuple result;
1483 uint32 len;
1484
1485 Assert(htup->t_len > MINIMAL_TUPLE_OFFSET);
1486 len = htup->t_len - MINIMAL_TUPLE_OFFSET;
1487 result = (MinimalTuple) palloc(len);
1488 memcpy(result, (char *) htup->t_data + MINIMAL_TUPLE_OFFSET, len);
1489 result->t_len = len;
1490 return result;
1491}
1492
1493/*
1494 * This mainly exists so JIT can inline the definition, but it's also
1495 * sometimes useful in debugging sessions.
1496 */
1497size_t
1498varsize_any(void *p)
1499{
1500 return VARSIZE_ANY(p);
1501}
1502