1 | /*------------------------------------------------------------------------- |
2 | * |
3 | * typcache.c |
4 | * POSTGRES type cache code |
5 | * |
6 | * The type cache exists to speed lookup of certain information about data |
7 | * types that is not directly available from a type's pg_type row. For |
8 | * example, we use a type's default btree opclass, or the default hash |
9 | * opclass if no btree opclass exists, to determine which operators should |
10 | * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC). |
11 | * |
12 | * Several seemingly-odd choices have been made to support use of the type |
13 | * cache by generic array and record handling routines, such as array_eq(), |
14 | * record_cmp(), and hash_array(). Because those routines are used as index |
15 | * support operations, they cannot leak memory. To allow them to execute |
16 | * efficiently, all information that they would like to re-use across calls |
17 | * is kept in the type cache. |
18 | * |
19 | * Once created, a type cache entry lives as long as the backend does, so |
20 | * there is no need for a call to release a cache entry. If the type is |
21 | * dropped, the cache entry simply becomes wasted storage. This is not |
22 | * expected to happen often, and assuming that typcache entries are good |
23 | * permanently allows caching pointers to them in long-lived places. |
24 | * |
25 | * We have some provisions for updating cache entries if the stored data |
26 | * becomes obsolete. Information dependent on opclasses is cleared if we |
27 | * detect updates to pg_opclass. We also support clearing the tuple |
28 | * descriptor and operator/function parts of a rowtype's cache entry, |
29 | * since those may need to change as a consequence of ALTER TABLE. |
30 | * Domain constraint changes are also tracked properly. |
31 | * |
32 | * |
33 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
34 | * Portions Copyright (c) 1994, Regents of the University of California |
35 | * |
36 | * IDENTIFICATION |
37 | * src/backend/utils/cache/typcache.c |
38 | * |
39 | *------------------------------------------------------------------------- |
40 | */ |
41 | #include "postgres.h" |
42 | |
43 | #include <limits.h> |
44 | |
45 | #include "access/hash.h" |
46 | #include "access/htup_details.h" |
47 | #include "access/nbtree.h" |
48 | #include "access/parallel.h" |
49 | #include "access/relation.h" |
50 | #include "access/session.h" |
51 | #include "access/table.h" |
52 | #include "catalog/indexing.h" |
53 | #include "catalog/pg_am.h" |
54 | #include "catalog/pg_constraint.h" |
55 | #include "catalog/pg_enum.h" |
56 | #include "catalog/pg_operator.h" |
57 | #include "catalog/pg_range.h" |
58 | #include "catalog/pg_type.h" |
59 | #include "commands/defrem.h" |
60 | #include "executor/executor.h" |
61 | #include "lib/dshash.h" |
62 | #include "optimizer/optimizer.h" |
63 | #include "storage/lwlock.h" |
64 | #include "utils/builtins.h" |
65 | #include "utils/catcache.h" |
66 | #include "utils/fmgroids.h" |
67 | #include "utils/inval.h" |
68 | #include "utils/lsyscache.h" |
69 | #include "utils/memutils.h" |
70 | #include "utils/rel.h" |
71 | #include "utils/snapmgr.h" |
72 | #include "utils/syscache.h" |
73 | #include "utils/typcache.h" |
74 | |
75 | |
76 | /* The main type cache hashtable searched by lookup_type_cache */ |
77 | static HTAB *TypeCacheHash = NULL; |
78 | |
79 | /* List of type cache entries for domain types */ |
80 | static TypeCacheEntry *firstDomainTypeEntry = NULL; |
81 | |
82 | /* Private flag bits in the TypeCacheEntry.flags field */ |
83 | #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000001 |
84 | #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000002 |
85 | #define TCFLAGS_CHECKED_EQ_OPR 0x000004 |
86 | #define TCFLAGS_CHECKED_LT_OPR 0x000008 |
87 | #define TCFLAGS_CHECKED_GT_OPR 0x000010 |
88 | #define TCFLAGS_CHECKED_CMP_PROC 0x000020 |
89 | #define TCFLAGS_CHECKED_HASH_PROC 0x000040 |
90 | #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000080 |
91 | #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000100 |
92 | #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000200 |
93 | #define TCFLAGS_HAVE_ELEM_COMPARE 0x000400 |
94 | #define TCFLAGS_HAVE_ELEM_HASHING 0x000800 |
95 | #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x001000 |
96 | #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x002000 |
97 | #define TCFLAGS_HAVE_FIELD_EQUALITY 0x004000 |
98 | #define TCFLAGS_HAVE_FIELD_COMPARE 0x008000 |
99 | #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x010000 |
100 | #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x020000 |
101 | |
102 | /* |
103 | * Data stored about a domain type's constraints. Note that we do not create |
104 | * this struct for the common case of a constraint-less domain; we just set |
105 | * domainData to NULL to indicate that. |
106 | * |
107 | * Within a DomainConstraintCache, we store expression plan trees, but the |
108 | * check_exprstate fields of the DomainConstraintState nodes are just NULL. |
109 | * When needed, expression evaluation nodes are built by flat-copying the |
110 | * DomainConstraintState nodes and applying ExecInitExpr to check_expr. |
111 | * Such a node tree is not part of the DomainConstraintCache, but is |
112 | * considered to belong to a DomainConstraintRef. |
113 | */ |
114 | struct DomainConstraintCache |
115 | { |
116 | List *constraints; /* list of DomainConstraintState nodes */ |
117 | MemoryContext dccContext; /* memory context holding all associated data */ |
118 | long dccRefCount; /* number of references to this struct */ |
119 | }; |
120 | |
121 | /* Private information to support comparisons of enum values */ |
122 | typedef struct |
123 | { |
124 | Oid enum_oid; /* OID of one enum value */ |
125 | float4 sort_order; /* its sort position */ |
126 | } EnumItem; |
127 | |
128 | typedef struct TypeCacheEnumData |
129 | { |
130 | Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */ |
131 | Bitmapset *sorted_values; /* Set of OIDs known to be in order */ |
132 | int num_values; /* total number of values in enum */ |
133 | EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]; |
134 | } TypeCacheEnumData; |
135 | |
136 | /* |
137 | * We use a separate table for storing the definitions of non-anonymous |
138 | * record types. Once defined, a record type will be remembered for the |
139 | * life of the backend. Subsequent uses of the "same" record type (where |
140 | * sameness means equalTupleDescs) will refer to the existing table entry. |
141 | * |
142 | * Stored record types are remembered in a linear array of TupleDescs, |
143 | * which can be indexed quickly with the assigned typmod. There is also |
144 | * a hash table to speed searches for matching TupleDescs. |
145 | */ |
146 | |
147 | typedef struct RecordCacheEntry |
148 | { |
149 | TupleDesc tupdesc; |
150 | } RecordCacheEntry; |
151 | |
152 | /* |
153 | * To deal with non-anonymous record types that are exchanged by backends |
154 | * involved in a parallel query, we also need a shared version of the above. |
155 | */ |
156 | struct SharedRecordTypmodRegistry |
157 | { |
158 | /* A hash table for finding a matching TupleDesc. */ |
159 | dshash_table_handle record_table_handle; |
160 | /* A hash table for finding a TupleDesc by typmod. */ |
161 | dshash_table_handle typmod_table_handle; |
162 | /* A source of new record typmod numbers. */ |
163 | pg_atomic_uint32 next_typmod; |
164 | }; |
165 | |
166 | /* |
167 | * When using shared tuple descriptors as hash table keys we need a way to be |
168 | * able to search for an equal shared TupleDesc using a backend-local |
169 | * TupleDesc. So we use this type which can hold either, and hash and compare |
170 | * functions that know how to handle both. |
171 | */ |
172 | typedef struct SharedRecordTableKey |
173 | { |
174 | union |
175 | { |
176 | TupleDesc local_tupdesc; |
177 | dsa_pointer shared_tupdesc; |
178 | } u; |
179 | bool shared; |
180 | } SharedRecordTableKey; |
181 | |
182 | /* |
183 | * The shared version of RecordCacheEntry. This lets us look up a typmod |
184 | * using a TupleDesc which may be in local or shared memory. |
185 | */ |
186 | typedef struct SharedRecordTableEntry |
187 | { |
188 | SharedRecordTableKey key; |
189 | } SharedRecordTableEntry; |
190 | |
191 | /* |
192 | * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look |
193 | * up a TupleDesc in shared memory using a typmod. |
194 | */ |
195 | typedef struct SharedTypmodTableEntry |
196 | { |
197 | uint32 typmod; |
198 | dsa_pointer shared_tupdesc; |
199 | } SharedTypmodTableEntry; |
200 | |
201 | /* |
202 | * A comparator function for SharedRecordTableKey. |
203 | */ |
204 | static int |
205 | shared_record_table_compare(const void *a, const void *b, size_t size, |
206 | void *arg) |
207 | { |
208 | dsa_area *area = (dsa_area *) arg; |
209 | SharedRecordTableKey *k1 = (SharedRecordTableKey *) a; |
210 | SharedRecordTableKey *k2 = (SharedRecordTableKey *) b; |
211 | TupleDesc t1; |
212 | TupleDesc t2; |
213 | |
214 | if (k1->shared) |
215 | t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc); |
216 | else |
217 | t1 = k1->u.local_tupdesc; |
218 | |
219 | if (k2->shared) |
220 | t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc); |
221 | else |
222 | t2 = k2->u.local_tupdesc; |
223 | |
224 | return equalTupleDescs(t1, t2) ? 0 : 1; |
225 | } |
226 | |
227 | /* |
228 | * A hash function for SharedRecordTableKey. |
229 | */ |
230 | static uint32 |
231 | shared_record_table_hash(const void *a, size_t size, void *arg) |
232 | { |
233 | dsa_area *area = (dsa_area *) arg; |
234 | SharedRecordTableKey *k = (SharedRecordTableKey *) a; |
235 | TupleDesc t; |
236 | |
237 | if (k->shared) |
238 | t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc); |
239 | else |
240 | t = k->u.local_tupdesc; |
241 | |
242 | return hashTupleDesc(t); |
243 | } |
244 | |
245 | /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */ |
246 | static const dshash_parameters srtr_record_table_params = { |
247 | sizeof(SharedRecordTableKey), /* unused */ |
248 | sizeof(SharedRecordTableEntry), |
249 | shared_record_table_compare, |
250 | shared_record_table_hash, |
251 | LWTRANCHE_SESSION_RECORD_TABLE |
252 | }; |
253 | |
254 | /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */ |
255 | static const dshash_parameters srtr_typmod_table_params = { |
256 | sizeof(uint32), |
257 | sizeof(SharedTypmodTableEntry), |
258 | dshash_memcmp, |
259 | dshash_memhash, |
260 | LWTRANCHE_SESSION_TYPMOD_TABLE |
261 | }; |
262 | |
263 | /* hashtable for recognizing registered record types */ |
264 | static HTAB *RecordCacheHash = NULL; |
265 | |
266 | /* arrays of info about registered record types, indexed by assigned typmod */ |
267 | static TupleDesc *RecordCacheArray = NULL; |
268 | static uint64 *RecordIdentifierArray = NULL; |
269 | static int32 RecordCacheArrayLen = 0; /* allocated length of above arrays */ |
270 | static int32 NextRecordTypmod = 0; /* number of entries used */ |
271 | |
272 | /* |
273 | * Process-wide counter for generating unique tupledesc identifiers. |
274 | * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen |
275 | * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER. |
276 | */ |
277 | static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER; |
278 | |
279 | static void load_typcache_tupdesc(TypeCacheEntry *typentry); |
280 | static void load_rangetype_info(TypeCacheEntry *typentry); |
281 | static void load_domaintype_info(TypeCacheEntry *typentry); |
282 | static int dcs_cmp(const void *a, const void *b); |
283 | static void decr_dcc_refcount(DomainConstraintCache *dcc); |
284 | static void dccref_deletion_callback(void *arg); |
285 | static List *prep_domain_constraints(List *constraints, MemoryContext execctx); |
286 | static bool array_element_has_equality(TypeCacheEntry *typentry); |
287 | static bool array_element_has_compare(TypeCacheEntry *typentry); |
288 | static bool array_element_has_hashing(TypeCacheEntry *typentry); |
289 | static bool array_element_has_extended_hashing(TypeCacheEntry *typentry); |
290 | static void cache_array_element_properties(TypeCacheEntry *typentry); |
291 | static bool record_fields_have_equality(TypeCacheEntry *typentry); |
292 | static bool record_fields_have_compare(TypeCacheEntry *typentry); |
293 | static void cache_record_field_properties(TypeCacheEntry *typentry); |
294 | static bool range_element_has_hashing(TypeCacheEntry *typentry); |
295 | static bool range_element_has_extended_hashing(TypeCacheEntry *typentry); |
296 | static void cache_range_element_properties(TypeCacheEntry *typentry); |
297 | static void TypeCacheRelCallback(Datum arg, Oid relid); |
298 | static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue); |
299 | static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue); |
300 | static void load_enum_cache_data(TypeCacheEntry *tcache); |
301 | static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg); |
302 | static int enum_oid_cmp(const void *left, const void *right); |
303 | static void shared_record_typmod_registry_detach(dsm_segment *segment, |
304 | Datum datum); |
305 | static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc); |
306 | static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, |
307 | uint32 typmod); |
308 | |
309 | |
310 | /* |
311 | * lookup_type_cache |
312 | * |
313 | * Fetch the type cache entry for the specified datatype, and make sure that |
314 | * all the fields requested by bits in 'flags' are valid. |
315 | * |
316 | * The result is never NULL --- we will ereport() if the passed type OID is |
317 | * invalid. Note however that we may fail to find one or more of the |
318 | * values requested by 'flags'; the caller needs to check whether the fields |
319 | * are InvalidOid or not. |
320 | */ |
321 | TypeCacheEntry * |
322 | lookup_type_cache(Oid type_id, int flags) |
323 | { |
324 | TypeCacheEntry *typentry; |
325 | bool found; |
326 | |
327 | if (TypeCacheHash == NULL) |
328 | { |
329 | /* First time through: initialize the hash table */ |
330 | HASHCTL ctl; |
331 | |
332 | MemSet(&ctl, 0, sizeof(ctl)); |
333 | ctl.keysize = sizeof(Oid); |
334 | ctl.entrysize = sizeof(TypeCacheEntry); |
335 | TypeCacheHash = hash_create("Type information cache" , 64, |
336 | &ctl, HASH_ELEM | HASH_BLOBS); |
337 | |
338 | /* Also set up callbacks for SI invalidations */ |
339 | CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0); |
340 | CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0); |
341 | CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0); |
342 | CacheRegisterSyscacheCallback(TYPEOID, TypeCacheConstrCallback, (Datum) 0); |
343 | |
344 | /* Also make sure CacheMemoryContext exists */ |
345 | if (!CacheMemoryContext) |
346 | CreateCacheMemoryContext(); |
347 | } |
348 | |
349 | /* Try to look up an existing entry */ |
350 | typentry = (TypeCacheEntry *) hash_search(TypeCacheHash, |
351 | (void *) &type_id, |
352 | HASH_FIND, NULL); |
353 | if (typentry == NULL) |
354 | { |
355 | /* |
356 | * If we didn't find one, we want to make one. But first look up the |
357 | * pg_type row, just to make sure we don't make a cache entry for an |
358 | * invalid type OID. If the type OID is not valid, present a |
359 | * user-facing error, since some code paths such as domain_in() allow |
360 | * this function to be reached with a user-supplied OID. |
361 | */ |
362 | HeapTuple tp; |
363 | Form_pg_type typtup; |
364 | |
365 | tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id)); |
366 | if (!HeapTupleIsValid(tp)) |
367 | ereport(ERROR, |
368 | (errcode(ERRCODE_UNDEFINED_OBJECT), |
369 | errmsg("type with OID %u does not exist" , type_id))); |
370 | typtup = (Form_pg_type) GETSTRUCT(tp); |
371 | if (!typtup->typisdefined) |
372 | ereport(ERROR, |
373 | (errcode(ERRCODE_UNDEFINED_OBJECT), |
374 | errmsg("type \"%s\" is only a shell" , |
375 | NameStr(typtup->typname)))); |
376 | |
377 | /* Now make the typcache entry */ |
378 | typentry = (TypeCacheEntry *) hash_search(TypeCacheHash, |
379 | (void *) &type_id, |
380 | HASH_ENTER, &found); |
381 | Assert(!found); /* it wasn't there a moment ago */ |
382 | |
383 | MemSet(typentry, 0, sizeof(TypeCacheEntry)); |
384 | typentry->type_id = type_id; |
385 | typentry->typlen = typtup->typlen; |
386 | typentry->typbyval = typtup->typbyval; |
387 | typentry->typalign = typtup->typalign; |
388 | typentry->typstorage = typtup->typstorage; |
389 | typentry->typtype = typtup->typtype; |
390 | typentry->typrelid = typtup->typrelid; |
391 | typentry->typelem = typtup->typelem; |
392 | typentry->typcollation = typtup->typcollation; |
393 | |
394 | /* If it's a domain, immediately thread it into the domain cache list */ |
395 | if (typentry->typtype == TYPTYPE_DOMAIN) |
396 | { |
397 | typentry->nextDomain = firstDomainTypeEntry; |
398 | firstDomainTypeEntry = typentry; |
399 | } |
400 | |
401 | ReleaseSysCache(tp); |
402 | } |
403 | |
404 | /* |
405 | * Look up opclasses if we haven't already and any dependent info is |
406 | * requested. |
407 | */ |
408 | if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR | |
409 | TYPECACHE_CMP_PROC | |
410 | TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO | |
411 | TYPECACHE_BTREE_OPFAMILY)) && |
412 | !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS)) |
413 | { |
414 | Oid opclass; |
415 | |
416 | opclass = GetDefaultOpClass(type_id, BTREE_AM_OID); |
417 | if (OidIsValid(opclass)) |
418 | { |
419 | typentry->btree_opf = get_opclass_family(opclass); |
420 | typentry->btree_opintype = get_opclass_input_type(opclass); |
421 | } |
422 | else |
423 | { |
424 | typentry->btree_opf = typentry->btree_opintype = InvalidOid; |
425 | } |
426 | |
427 | /* |
428 | * Reset information derived from btree opclass. Note in particular |
429 | * that we'll redetermine the eq_opr even if we previously found one; |
430 | * this matters in case a btree opclass has been added to a type that |
431 | * previously had only a hash opclass. |
432 | */ |
433 | typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR | |
434 | TCFLAGS_CHECKED_LT_OPR | |
435 | TCFLAGS_CHECKED_GT_OPR | |
436 | TCFLAGS_CHECKED_CMP_PROC); |
437 | typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS; |
438 | } |
439 | |
440 | /* |
441 | * If we need to look up equality operator, and there's no btree opclass, |
442 | * force lookup of hash opclass. |
443 | */ |
444 | if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) && |
445 | !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) && |
446 | typentry->btree_opf == InvalidOid) |
447 | flags |= TYPECACHE_HASH_OPFAMILY; |
448 | |
449 | if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO | |
450 | TYPECACHE_HASH_EXTENDED_PROC | |
451 | TYPECACHE_HASH_EXTENDED_PROC_FINFO | |
452 | TYPECACHE_HASH_OPFAMILY)) && |
453 | !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS)) |
454 | { |
455 | Oid opclass; |
456 | |
457 | opclass = GetDefaultOpClass(type_id, HASH_AM_OID); |
458 | if (OidIsValid(opclass)) |
459 | { |
460 | typentry->hash_opf = get_opclass_family(opclass); |
461 | typentry->hash_opintype = get_opclass_input_type(opclass); |
462 | } |
463 | else |
464 | { |
465 | typentry->hash_opf = typentry->hash_opintype = InvalidOid; |
466 | } |
467 | |
468 | /* |
469 | * Reset information derived from hash opclass. We do *not* reset the |
470 | * eq_opr; if we already found one from the btree opclass, that |
471 | * decision is still good. |
472 | */ |
473 | typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC | |
474 | TCFLAGS_CHECKED_HASH_EXTENDED_PROC); |
475 | typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS; |
476 | } |
477 | |
478 | /* |
479 | * Look for requested operators and functions, if we haven't already. |
480 | */ |
481 | if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) && |
482 | !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR)) |
483 | { |
484 | Oid eq_opr = InvalidOid; |
485 | |
486 | if (typentry->btree_opf != InvalidOid) |
487 | eq_opr = get_opfamily_member(typentry->btree_opf, |
488 | typentry->btree_opintype, |
489 | typentry->btree_opintype, |
490 | BTEqualStrategyNumber); |
491 | if (eq_opr == InvalidOid && |
492 | typentry->hash_opf != InvalidOid) |
493 | eq_opr = get_opfamily_member(typentry->hash_opf, |
494 | typentry->hash_opintype, |
495 | typentry->hash_opintype, |
496 | HTEqualStrategyNumber); |
497 | |
498 | /* |
499 | * If the proposed equality operator is array_eq or record_eq, check |
500 | * to see if the element type or column types support equality. If |
501 | * not, array_eq or record_eq would fail at runtime, so we don't want |
502 | * to report that the type has equality. (We can omit similar |
503 | * checking for ranges because ranges can't be created in the first |
504 | * place unless their subtypes support equality.) |
505 | */ |
506 | if (eq_opr == ARRAY_EQ_OP && |
507 | !array_element_has_equality(typentry)) |
508 | eq_opr = InvalidOid; |
509 | else if (eq_opr == RECORD_EQ_OP && |
510 | !record_fields_have_equality(typentry)) |
511 | eq_opr = InvalidOid; |
512 | |
513 | /* Force update of eq_opr_finfo only if we're changing state */ |
514 | if (typentry->eq_opr != eq_opr) |
515 | typentry->eq_opr_finfo.fn_oid = InvalidOid; |
516 | |
517 | typentry->eq_opr = eq_opr; |
518 | |
519 | /* |
520 | * Reset info about hash functions whenever we pick up new info about |
521 | * equality operator. This is so we can ensure that the hash |
522 | * functions match the operator. |
523 | */ |
524 | typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC | |
525 | TCFLAGS_CHECKED_HASH_EXTENDED_PROC); |
526 | typentry->flags |= TCFLAGS_CHECKED_EQ_OPR; |
527 | } |
528 | if ((flags & TYPECACHE_LT_OPR) && |
529 | !(typentry->flags & TCFLAGS_CHECKED_LT_OPR)) |
530 | { |
531 | Oid lt_opr = InvalidOid; |
532 | |
533 | if (typentry->btree_opf != InvalidOid) |
534 | lt_opr = get_opfamily_member(typentry->btree_opf, |
535 | typentry->btree_opintype, |
536 | typentry->btree_opintype, |
537 | BTLessStrategyNumber); |
538 | |
539 | /* |
540 | * As above, make sure array_cmp or record_cmp will succeed; but again |
541 | * we need no special check for ranges. |
542 | */ |
543 | if (lt_opr == ARRAY_LT_OP && |
544 | !array_element_has_compare(typentry)) |
545 | lt_opr = InvalidOid; |
546 | else if (lt_opr == RECORD_LT_OP && |
547 | !record_fields_have_compare(typentry)) |
548 | lt_opr = InvalidOid; |
549 | |
550 | typentry->lt_opr = lt_opr; |
551 | typentry->flags |= TCFLAGS_CHECKED_LT_OPR; |
552 | } |
553 | if ((flags & TYPECACHE_GT_OPR) && |
554 | !(typentry->flags & TCFLAGS_CHECKED_GT_OPR)) |
555 | { |
556 | Oid gt_opr = InvalidOid; |
557 | |
558 | if (typentry->btree_opf != InvalidOid) |
559 | gt_opr = get_opfamily_member(typentry->btree_opf, |
560 | typentry->btree_opintype, |
561 | typentry->btree_opintype, |
562 | BTGreaterStrategyNumber); |
563 | |
564 | /* |
565 | * As above, make sure array_cmp or record_cmp will succeed; but again |
566 | * we need no special check for ranges. |
567 | */ |
568 | if (gt_opr == ARRAY_GT_OP && |
569 | !array_element_has_compare(typentry)) |
570 | gt_opr = InvalidOid; |
571 | else if (gt_opr == RECORD_GT_OP && |
572 | !record_fields_have_compare(typentry)) |
573 | gt_opr = InvalidOid; |
574 | |
575 | typentry->gt_opr = gt_opr; |
576 | typentry->flags |= TCFLAGS_CHECKED_GT_OPR; |
577 | } |
578 | if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) && |
579 | !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC)) |
580 | { |
581 | Oid cmp_proc = InvalidOid; |
582 | |
583 | if (typentry->btree_opf != InvalidOid) |
584 | cmp_proc = get_opfamily_proc(typentry->btree_opf, |
585 | typentry->btree_opintype, |
586 | typentry->btree_opintype, |
587 | BTORDER_PROC); |
588 | |
589 | /* |
590 | * As above, make sure array_cmp or record_cmp will succeed; but again |
591 | * we need no special check for ranges. |
592 | */ |
593 | if (cmp_proc == F_BTARRAYCMP && |
594 | !array_element_has_compare(typentry)) |
595 | cmp_proc = InvalidOid; |
596 | else if (cmp_proc == F_BTRECORDCMP && |
597 | !record_fields_have_compare(typentry)) |
598 | cmp_proc = InvalidOid; |
599 | |
600 | /* Force update of cmp_proc_finfo only if we're changing state */ |
601 | if (typentry->cmp_proc != cmp_proc) |
602 | typentry->cmp_proc_finfo.fn_oid = InvalidOid; |
603 | |
604 | typentry->cmp_proc = cmp_proc; |
605 | typentry->flags |= TCFLAGS_CHECKED_CMP_PROC; |
606 | } |
607 | if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) && |
608 | !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC)) |
609 | { |
610 | Oid hash_proc = InvalidOid; |
611 | |
612 | /* |
613 | * We insist that the eq_opr, if one has been determined, match the |
614 | * hash opclass; else report there is no hash function. |
615 | */ |
616 | if (typentry->hash_opf != InvalidOid && |
617 | (!OidIsValid(typentry->eq_opr) || |
618 | typentry->eq_opr == get_opfamily_member(typentry->hash_opf, |
619 | typentry->hash_opintype, |
620 | typentry->hash_opintype, |
621 | HTEqualStrategyNumber))) |
622 | hash_proc = get_opfamily_proc(typentry->hash_opf, |
623 | typentry->hash_opintype, |
624 | typentry->hash_opintype, |
625 | HASHSTANDARD_PROC); |
626 | |
627 | /* |
628 | * As above, make sure hash_array will succeed. We don't currently |
629 | * support hashing for composite types, but when we do, we'll need |
630 | * more logic here to check that case too. |
631 | */ |
632 | if (hash_proc == F_HASH_ARRAY && |
633 | !array_element_has_hashing(typentry)) |
634 | hash_proc = InvalidOid; |
635 | |
636 | /* |
637 | * Likewise for hash_range. |
638 | */ |
639 | if (hash_proc == F_HASH_RANGE && |
640 | !range_element_has_hashing(typentry)) |
641 | hash_proc = InvalidOid; |
642 | |
643 | /* Force update of hash_proc_finfo only if we're changing state */ |
644 | if (typentry->hash_proc != hash_proc) |
645 | typentry->hash_proc_finfo.fn_oid = InvalidOid; |
646 | |
647 | typentry->hash_proc = hash_proc; |
648 | typentry->flags |= TCFLAGS_CHECKED_HASH_PROC; |
649 | } |
650 | if ((flags & (TYPECACHE_HASH_EXTENDED_PROC | |
651 | TYPECACHE_HASH_EXTENDED_PROC_FINFO)) && |
652 | !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC)) |
653 | { |
654 | Oid hash_extended_proc = InvalidOid; |
655 | |
656 | /* |
657 | * We insist that the eq_opr, if one has been determined, match the |
658 | * hash opclass; else report there is no hash function. |
659 | */ |
660 | if (typentry->hash_opf != InvalidOid && |
661 | (!OidIsValid(typentry->eq_opr) || |
662 | typentry->eq_opr == get_opfamily_member(typentry->hash_opf, |
663 | typentry->hash_opintype, |
664 | typentry->hash_opintype, |
665 | HTEqualStrategyNumber))) |
666 | hash_extended_proc = get_opfamily_proc(typentry->hash_opf, |
667 | typentry->hash_opintype, |
668 | typentry->hash_opintype, |
669 | HASHEXTENDED_PROC); |
670 | |
671 | /* |
672 | * As above, make sure hash_array_extended will succeed. We don't |
673 | * currently support hashing for composite types, but when we do, |
674 | * we'll need more logic here to check that case too. |
675 | */ |
676 | if (hash_extended_proc == F_HASH_ARRAY_EXTENDED && |
677 | !array_element_has_extended_hashing(typentry)) |
678 | hash_extended_proc = InvalidOid; |
679 | |
680 | /* |
681 | * Likewise for hash_range_extended. |
682 | */ |
683 | if (hash_extended_proc == F_HASH_RANGE_EXTENDED && |
684 | !range_element_has_extended_hashing(typentry)) |
685 | hash_extended_proc = InvalidOid; |
686 | |
687 | /* Force update of proc finfo only if we're changing state */ |
688 | if (typentry->hash_extended_proc != hash_extended_proc) |
689 | typentry->hash_extended_proc_finfo.fn_oid = InvalidOid; |
690 | |
691 | typentry->hash_extended_proc = hash_extended_proc; |
692 | typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC; |
693 | } |
694 | |
695 | /* |
696 | * Set up fmgr lookup info as requested |
697 | * |
698 | * Note: we tell fmgr the finfo structures live in CacheMemoryContext, |
699 | * which is not quite right (they're really in the hash table's private |
700 | * memory context) but this will do for our purposes. |
701 | * |
702 | * Note: the code above avoids invalidating the finfo structs unless the |
703 | * referenced operator/function OID actually changes. This is to prevent |
704 | * unnecessary leakage of any subsidiary data attached to an finfo, since |
705 | * that would cause session-lifespan memory leaks. |
706 | */ |
707 | if ((flags & TYPECACHE_EQ_OPR_FINFO) && |
708 | typentry->eq_opr_finfo.fn_oid == InvalidOid && |
709 | typentry->eq_opr != InvalidOid) |
710 | { |
711 | Oid eq_opr_func; |
712 | |
713 | eq_opr_func = get_opcode(typentry->eq_opr); |
714 | if (eq_opr_func != InvalidOid) |
715 | fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo, |
716 | CacheMemoryContext); |
717 | } |
718 | if ((flags & TYPECACHE_CMP_PROC_FINFO) && |
719 | typentry->cmp_proc_finfo.fn_oid == InvalidOid && |
720 | typentry->cmp_proc != InvalidOid) |
721 | { |
722 | fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo, |
723 | CacheMemoryContext); |
724 | } |
725 | if ((flags & TYPECACHE_HASH_PROC_FINFO) && |
726 | typentry->hash_proc_finfo.fn_oid == InvalidOid && |
727 | typentry->hash_proc != InvalidOid) |
728 | { |
729 | fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo, |
730 | CacheMemoryContext); |
731 | } |
732 | if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) && |
733 | typentry->hash_extended_proc_finfo.fn_oid == InvalidOid && |
734 | typentry->hash_extended_proc != InvalidOid) |
735 | { |
736 | fmgr_info_cxt(typentry->hash_extended_proc, |
737 | &typentry->hash_extended_proc_finfo, |
738 | CacheMemoryContext); |
739 | } |
740 | |
741 | /* |
742 | * If it's a composite type (row type), get tupdesc if requested |
743 | */ |
744 | if ((flags & TYPECACHE_TUPDESC) && |
745 | typentry->tupDesc == NULL && |
746 | typentry->typtype == TYPTYPE_COMPOSITE) |
747 | { |
748 | load_typcache_tupdesc(typentry); |
749 | } |
750 | |
751 | /* |
752 | * If requested, get information about a range type |
753 | */ |
754 | if ((flags & TYPECACHE_RANGE_INFO) && |
755 | typentry->rngelemtype == NULL && |
756 | typentry->typtype == TYPTYPE_RANGE) |
757 | { |
758 | load_rangetype_info(typentry); |
759 | } |
760 | |
761 | /* |
762 | * If requested, get information about a domain type |
763 | */ |
764 | if ((flags & TYPECACHE_DOMAIN_BASE_INFO) && |
765 | typentry->domainBaseType == InvalidOid && |
766 | typentry->typtype == TYPTYPE_DOMAIN) |
767 | { |
768 | typentry->domainBaseTypmod = -1; |
769 | typentry->domainBaseType = |
770 | getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod); |
771 | } |
772 | if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) && |
773 | (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 && |
774 | typentry->typtype == TYPTYPE_DOMAIN) |
775 | { |
776 | load_domaintype_info(typentry); |
777 | } |
778 | |
779 | return typentry; |
780 | } |
781 | |
782 | /* |
783 | * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc |
784 | */ |
785 | static void |
786 | load_typcache_tupdesc(TypeCacheEntry *typentry) |
787 | { |
788 | Relation rel; |
789 | |
790 | if (!OidIsValid(typentry->typrelid)) /* should not happen */ |
791 | elog(ERROR, "invalid typrelid for composite type %u" , |
792 | typentry->type_id); |
793 | rel = relation_open(typentry->typrelid, AccessShareLock); |
794 | Assert(rel->rd_rel->reltype == typentry->type_id); |
795 | |
796 | /* |
797 | * Link to the tupdesc and increment its refcount (we assert it's a |
798 | * refcounted descriptor). We don't use IncrTupleDescRefCount() for this, |
799 | * because the reference mustn't be entered in the current resource owner; |
800 | * it can outlive the current query. |
801 | */ |
802 | typentry->tupDesc = RelationGetDescr(rel); |
803 | |
804 | Assert(typentry->tupDesc->tdrefcount > 0); |
805 | typentry->tupDesc->tdrefcount++; |
806 | |
807 | /* |
808 | * In future, we could take some pains to not change tupDesc_identifier if |
809 | * the tupdesc didn't really change; but for now it's not worth it. |
810 | */ |
811 | typentry->tupDesc_identifier = ++tupledesc_id_counter; |
812 | |
813 | relation_close(rel, AccessShareLock); |
814 | } |
815 | |
816 | /* |
817 | * load_rangetype_info --- helper routine to set up range type information |
818 | */ |
819 | static void |
820 | load_rangetype_info(TypeCacheEntry *typentry) |
821 | { |
822 | Form_pg_range pg_range; |
823 | HeapTuple tup; |
824 | Oid subtypeOid; |
825 | Oid opclassOid; |
826 | Oid canonicalOid; |
827 | Oid subdiffOid; |
828 | Oid opfamilyOid; |
829 | Oid opcintype; |
830 | Oid cmpFnOid; |
831 | |
832 | /* get information from pg_range */ |
833 | tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id)); |
834 | /* should not fail, since we already checked typtype ... */ |
835 | if (!HeapTupleIsValid(tup)) |
836 | elog(ERROR, "cache lookup failed for range type %u" , |
837 | typentry->type_id); |
838 | pg_range = (Form_pg_range) GETSTRUCT(tup); |
839 | |
840 | subtypeOid = pg_range->rngsubtype; |
841 | typentry->rng_collation = pg_range->rngcollation; |
842 | opclassOid = pg_range->rngsubopc; |
843 | canonicalOid = pg_range->rngcanonical; |
844 | subdiffOid = pg_range->rngsubdiff; |
845 | |
846 | ReleaseSysCache(tup); |
847 | |
848 | /* get opclass properties and look up the comparison function */ |
849 | opfamilyOid = get_opclass_family(opclassOid); |
850 | opcintype = get_opclass_input_type(opclassOid); |
851 | |
852 | cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype, |
853 | BTORDER_PROC); |
854 | if (!RegProcedureIsValid(cmpFnOid)) |
855 | elog(ERROR, "missing support function %d(%u,%u) in opfamily %u" , |
856 | BTORDER_PROC, opcintype, opcintype, opfamilyOid); |
857 | |
858 | /* set up cached fmgrinfo structs */ |
859 | fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo, |
860 | CacheMemoryContext); |
861 | if (OidIsValid(canonicalOid)) |
862 | fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo, |
863 | CacheMemoryContext); |
864 | if (OidIsValid(subdiffOid)) |
865 | fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo, |
866 | CacheMemoryContext); |
867 | |
868 | /* Lastly, set up link to the element type --- this marks data valid */ |
869 | typentry->rngelemtype = lookup_type_cache(subtypeOid, 0); |
870 | } |
871 | |
872 | |
873 | /* |
874 | * load_domaintype_info --- helper routine to set up domain constraint info |
875 | * |
876 | * Note: we assume we're called in a relatively short-lived context, so it's |
877 | * okay to leak data into the current context while scanning pg_constraint. |
878 | * We build the new DomainConstraintCache data in a context underneath |
879 | * CurrentMemoryContext, and reparent it under CacheMemoryContext when |
880 | * complete. |
881 | */ |
882 | static void |
883 | load_domaintype_info(TypeCacheEntry *typentry) |
884 | { |
885 | Oid typeOid = typentry->type_id; |
886 | DomainConstraintCache *dcc; |
887 | bool notNull = false; |
888 | DomainConstraintState **ccons; |
889 | int cconslen; |
890 | Relation conRel; |
891 | MemoryContext oldcxt; |
892 | |
893 | /* |
894 | * If we're here, any existing constraint info is stale, so release it. |
895 | * For safety, be sure to null the link before trying to delete the data. |
896 | */ |
897 | if (typentry->domainData) |
898 | { |
899 | dcc = typentry->domainData; |
900 | typentry->domainData = NULL; |
901 | decr_dcc_refcount(dcc); |
902 | } |
903 | |
904 | /* |
905 | * We try to optimize the common case of no domain constraints, so don't |
906 | * create the dcc object and context until we find a constraint. Likewise |
907 | * for the temp sorting array. |
908 | */ |
909 | dcc = NULL; |
910 | ccons = NULL; |
911 | cconslen = 0; |
912 | |
913 | /* |
914 | * Scan pg_constraint for relevant constraints. We want to find |
915 | * constraints for not just this domain, but any ancestor domains, so the |
916 | * outer loop crawls up the domain stack. |
917 | */ |
918 | conRel = table_open(ConstraintRelationId, AccessShareLock); |
919 | |
920 | for (;;) |
921 | { |
922 | HeapTuple tup; |
923 | HeapTuple conTup; |
924 | Form_pg_type typTup; |
925 | int nccons = 0; |
926 | ScanKeyData key[1]; |
927 | SysScanDesc scan; |
928 | |
929 | tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid)); |
930 | if (!HeapTupleIsValid(tup)) |
931 | elog(ERROR, "cache lookup failed for type %u" , typeOid); |
932 | typTup = (Form_pg_type) GETSTRUCT(tup); |
933 | |
934 | if (typTup->typtype != TYPTYPE_DOMAIN) |
935 | { |
936 | /* Not a domain, so done */ |
937 | ReleaseSysCache(tup); |
938 | break; |
939 | } |
940 | |
941 | /* Test for NOT NULL Constraint */ |
942 | if (typTup->typnotnull) |
943 | notNull = true; |
944 | |
945 | /* Look for CHECK Constraints on this domain */ |
946 | ScanKeyInit(&key[0], |
947 | Anum_pg_constraint_contypid, |
948 | BTEqualStrategyNumber, F_OIDEQ, |
949 | ObjectIdGetDatum(typeOid)); |
950 | |
951 | scan = systable_beginscan(conRel, ConstraintTypidIndexId, true, |
952 | NULL, 1, key); |
953 | |
954 | while (HeapTupleIsValid(conTup = systable_getnext(scan))) |
955 | { |
956 | Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup); |
957 | Datum val; |
958 | bool isNull; |
959 | char *constring; |
960 | Expr *check_expr; |
961 | DomainConstraintState *r; |
962 | |
963 | /* Ignore non-CHECK constraints (presently, shouldn't be any) */ |
964 | if (c->contype != CONSTRAINT_CHECK) |
965 | continue; |
966 | |
967 | /* Not expecting conbin to be NULL, but we'll test for it anyway */ |
968 | val = fastgetattr(conTup, Anum_pg_constraint_conbin, |
969 | conRel->rd_att, &isNull); |
970 | if (isNull) |
971 | elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin" , |
972 | NameStr(typTup->typname), NameStr(c->conname)); |
973 | |
974 | /* Convert conbin to C string in caller context */ |
975 | constring = TextDatumGetCString(val); |
976 | |
977 | /* Create the DomainConstraintCache object and context if needed */ |
978 | if (dcc == NULL) |
979 | { |
980 | MemoryContext cxt; |
981 | |
982 | cxt = AllocSetContextCreate(CurrentMemoryContext, |
983 | "Domain constraints" , |
984 | ALLOCSET_SMALL_SIZES); |
985 | dcc = (DomainConstraintCache *) |
986 | MemoryContextAlloc(cxt, sizeof(DomainConstraintCache)); |
987 | dcc->constraints = NIL; |
988 | dcc->dccContext = cxt; |
989 | dcc->dccRefCount = 0; |
990 | } |
991 | |
992 | /* Create node trees in DomainConstraintCache's context */ |
993 | oldcxt = MemoryContextSwitchTo(dcc->dccContext); |
994 | |
995 | check_expr = (Expr *) stringToNode(constring); |
996 | |
997 | /* |
998 | * Plan the expression, since ExecInitExpr will expect that. |
999 | * |
1000 | * Note: caching the result of expression_planner() is not very |
1001 | * good practice. Ideally we'd use a CachedExpression here so |
1002 | * that we would react promptly to, eg, changes in inlined |
1003 | * functions. However, because we don't support mutable domain |
1004 | * CHECK constraints, it's not really clear that it's worth the |
1005 | * extra overhead to do that. |
1006 | */ |
1007 | check_expr = expression_planner(check_expr); |
1008 | |
1009 | r = makeNode(DomainConstraintState); |
1010 | r->constrainttype = DOM_CONSTRAINT_CHECK; |
1011 | r->name = pstrdup(NameStr(c->conname)); |
1012 | r->check_expr = check_expr; |
1013 | r->check_exprstate = NULL; |
1014 | |
1015 | MemoryContextSwitchTo(oldcxt); |
1016 | |
1017 | /* Accumulate constraints in an array, for sorting below */ |
1018 | if (ccons == NULL) |
1019 | { |
1020 | cconslen = 8; |
1021 | ccons = (DomainConstraintState **) |
1022 | palloc(cconslen * sizeof(DomainConstraintState *)); |
1023 | } |
1024 | else if (nccons >= cconslen) |
1025 | { |
1026 | cconslen *= 2; |
1027 | ccons = (DomainConstraintState **) |
1028 | repalloc(ccons, cconslen * sizeof(DomainConstraintState *)); |
1029 | } |
1030 | ccons[nccons++] = r; |
1031 | } |
1032 | |
1033 | systable_endscan(scan); |
1034 | |
1035 | if (nccons > 0) |
1036 | { |
1037 | /* |
1038 | * Sort the items for this domain, so that CHECKs are applied in a |
1039 | * deterministic order. |
1040 | */ |
1041 | if (nccons > 1) |
1042 | qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp); |
1043 | |
1044 | /* |
1045 | * Now attach them to the overall list. Use lcons() here because |
1046 | * constraints of parent domains should be applied earlier. |
1047 | */ |
1048 | oldcxt = MemoryContextSwitchTo(dcc->dccContext); |
1049 | while (nccons > 0) |
1050 | dcc->constraints = lcons(ccons[--nccons], dcc->constraints); |
1051 | MemoryContextSwitchTo(oldcxt); |
1052 | } |
1053 | |
1054 | /* loop to next domain in stack */ |
1055 | typeOid = typTup->typbasetype; |
1056 | ReleaseSysCache(tup); |
1057 | } |
1058 | |
1059 | table_close(conRel, AccessShareLock); |
1060 | |
1061 | /* |
1062 | * Only need to add one NOT NULL check regardless of how many domains in |
1063 | * the stack request it. |
1064 | */ |
1065 | if (notNull) |
1066 | { |
1067 | DomainConstraintState *r; |
1068 | |
1069 | /* Create the DomainConstraintCache object and context if needed */ |
1070 | if (dcc == NULL) |
1071 | { |
1072 | MemoryContext cxt; |
1073 | |
1074 | cxt = AllocSetContextCreate(CurrentMemoryContext, |
1075 | "Domain constraints" , |
1076 | ALLOCSET_SMALL_SIZES); |
1077 | dcc = (DomainConstraintCache *) |
1078 | MemoryContextAlloc(cxt, sizeof(DomainConstraintCache)); |
1079 | dcc->constraints = NIL; |
1080 | dcc->dccContext = cxt; |
1081 | dcc->dccRefCount = 0; |
1082 | } |
1083 | |
1084 | /* Create node trees in DomainConstraintCache's context */ |
1085 | oldcxt = MemoryContextSwitchTo(dcc->dccContext); |
1086 | |
1087 | r = makeNode(DomainConstraintState); |
1088 | |
1089 | r->constrainttype = DOM_CONSTRAINT_NOTNULL; |
1090 | r->name = pstrdup("NOT NULL" ); |
1091 | r->check_expr = NULL; |
1092 | r->check_exprstate = NULL; |
1093 | |
1094 | /* lcons to apply the nullness check FIRST */ |
1095 | dcc->constraints = lcons(r, dcc->constraints); |
1096 | |
1097 | MemoryContextSwitchTo(oldcxt); |
1098 | } |
1099 | |
1100 | /* |
1101 | * If we made a constraint object, move it into CacheMemoryContext and |
1102 | * attach it to the typcache entry. |
1103 | */ |
1104 | if (dcc) |
1105 | { |
1106 | MemoryContextSetParent(dcc->dccContext, CacheMemoryContext); |
1107 | typentry->domainData = dcc; |
1108 | dcc->dccRefCount++; /* count the typcache's reference */ |
1109 | } |
1110 | |
1111 | /* Either way, the typcache entry's domain data is now valid. */ |
1112 | typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS; |
1113 | } |
1114 | |
1115 | /* |
1116 | * qsort comparator to sort DomainConstraintState pointers by name |
1117 | */ |
1118 | static int |
1119 | dcs_cmp(const void *a, const void *b) |
1120 | { |
1121 | const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a; |
1122 | const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b; |
1123 | |
1124 | return strcmp((*ca)->name, (*cb)->name); |
1125 | } |
1126 | |
1127 | /* |
1128 | * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount, |
1129 | * and free it if no references remain |
1130 | */ |
1131 | static void |
1132 | decr_dcc_refcount(DomainConstraintCache *dcc) |
1133 | { |
1134 | Assert(dcc->dccRefCount > 0); |
1135 | if (--(dcc->dccRefCount) <= 0) |
1136 | MemoryContextDelete(dcc->dccContext); |
1137 | } |
1138 | |
1139 | /* |
1140 | * Context reset/delete callback for a DomainConstraintRef |
1141 | */ |
1142 | static void |
1143 | dccref_deletion_callback(void *arg) |
1144 | { |
1145 | DomainConstraintRef *ref = (DomainConstraintRef *) arg; |
1146 | DomainConstraintCache *dcc = ref->dcc; |
1147 | |
1148 | /* Paranoia --- be sure link is nulled before trying to release */ |
1149 | if (dcc) |
1150 | { |
1151 | ref->constraints = NIL; |
1152 | ref->dcc = NULL; |
1153 | decr_dcc_refcount(dcc); |
1154 | } |
1155 | } |
1156 | |
1157 | /* |
1158 | * prep_domain_constraints --- prepare domain constraints for execution |
1159 | * |
1160 | * The expression trees stored in the DomainConstraintCache's list are |
1161 | * converted to executable expression state trees stored in execctx. |
1162 | */ |
1163 | static List * |
1164 | prep_domain_constraints(List *constraints, MemoryContext execctx) |
1165 | { |
1166 | List *result = NIL; |
1167 | MemoryContext oldcxt; |
1168 | ListCell *lc; |
1169 | |
1170 | oldcxt = MemoryContextSwitchTo(execctx); |
1171 | |
1172 | foreach(lc, constraints) |
1173 | { |
1174 | DomainConstraintState *r = (DomainConstraintState *) lfirst(lc); |
1175 | DomainConstraintState *newr; |
1176 | |
1177 | newr = makeNode(DomainConstraintState); |
1178 | newr->constrainttype = r->constrainttype; |
1179 | newr->name = r->name; |
1180 | newr->check_expr = r->check_expr; |
1181 | newr->check_exprstate = ExecInitExpr(r->check_expr, NULL); |
1182 | |
1183 | result = lappend(result, newr); |
1184 | } |
1185 | |
1186 | MemoryContextSwitchTo(oldcxt); |
1187 | |
1188 | return result; |
1189 | } |
1190 | |
1191 | /* |
1192 | * InitDomainConstraintRef --- initialize a DomainConstraintRef struct |
1193 | * |
1194 | * Caller must tell us the MemoryContext in which the DomainConstraintRef |
1195 | * lives. The ref will be cleaned up when that context is reset/deleted. |
1196 | * |
1197 | * Caller must also tell us whether it wants check_exprstate fields to be |
1198 | * computed in the DomainConstraintState nodes attached to this ref. |
1199 | * If it doesn't, we need not make a copy of the DomainConstraintState list. |
1200 | */ |
1201 | void |
1202 | InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, |
1203 | MemoryContext refctx, bool need_exprstate) |
1204 | { |
1205 | /* Look up the typcache entry --- we assume it survives indefinitely */ |
1206 | ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO); |
1207 | ref->need_exprstate = need_exprstate; |
1208 | /* For safety, establish the callback before acquiring a refcount */ |
1209 | ref->refctx = refctx; |
1210 | ref->dcc = NULL; |
1211 | ref->callback.func = dccref_deletion_callback; |
1212 | ref->callback.arg = (void *) ref; |
1213 | MemoryContextRegisterResetCallback(refctx, &ref->callback); |
1214 | /* Acquire refcount if there are constraints, and set up exported list */ |
1215 | if (ref->tcache->domainData) |
1216 | { |
1217 | ref->dcc = ref->tcache->domainData; |
1218 | ref->dcc->dccRefCount++; |
1219 | if (ref->need_exprstate) |
1220 | ref->constraints = prep_domain_constraints(ref->dcc->constraints, |
1221 | ref->refctx); |
1222 | else |
1223 | ref->constraints = ref->dcc->constraints; |
1224 | } |
1225 | else |
1226 | ref->constraints = NIL; |
1227 | } |
1228 | |
1229 | /* |
1230 | * UpdateDomainConstraintRef --- recheck validity of domain constraint info |
1231 | * |
1232 | * If the domain's constraint set changed, ref->constraints is updated to |
1233 | * point at a new list of cached constraints. |
1234 | * |
1235 | * In the normal case where nothing happened to the domain, this is cheap |
1236 | * enough that it's reasonable (and expected) to check before *each* use |
1237 | * of the constraint info. |
1238 | */ |
1239 | void |
1240 | UpdateDomainConstraintRef(DomainConstraintRef *ref) |
1241 | { |
1242 | TypeCacheEntry *typentry = ref->tcache; |
1243 | |
1244 | /* Make sure typcache entry's data is up to date */ |
1245 | if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 && |
1246 | typentry->typtype == TYPTYPE_DOMAIN) |
1247 | load_domaintype_info(typentry); |
1248 | |
1249 | /* Transfer to ref object if there's new info, adjusting refcounts */ |
1250 | if (ref->dcc != typentry->domainData) |
1251 | { |
1252 | /* Paranoia --- be sure link is nulled before trying to release */ |
1253 | DomainConstraintCache *dcc = ref->dcc; |
1254 | |
1255 | if (dcc) |
1256 | { |
1257 | /* |
1258 | * Note: we just leak the previous list of executable domain |
1259 | * constraints. Alternatively, we could keep those in a child |
1260 | * context of ref->refctx and free that context at this point. |
1261 | * However, in practice this code path will be taken so seldom |
1262 | * that the extra bookkeeping for a child context doesn't seem |
1263 | * worthwhile; we'll just allow a leak for the lifespan of refctx. |
1264 | */ |
1265 | ref->constraints = NIL; |
1266 | ref->dcc = NULL; |
1267 | decr_dcc_refcount(dcc); |
1268 | } |
1269 | dcc = typentry->domainData; |
1270 | if (dcc) |
1271 | { |
1272 | ref->dcc = dcc; |
1273 | dcc->dccRefCount++; |
1274 | if (ref->need_exprstate) |
1275 | ref->constraints = prep_domain_constraints(dcc->constraints, |
1276 | ref->refctx); |
1277 | else |
1278 | ref->constraints = dcc->constraints; |
1279 | } |
1280 | } |
1281 | } |
1282 | |
1283 | /* |
1284 | * DomainHasConstraints --- utility routine to check if a domain has constraints |
1285 | * |
1286 | * This is defined to return false, not fail, if type is not a domain. |
1287 | */ |
1288 | bool |
1289 | DomainHasConstraints(Oid type_id) |
1290 | { |
1291 | TypeCacheEntry *typentry; |
1292 | |
1293 | /* |
1294 | * Note: a side effect is to cause the typcache's domain data to become |
1295 | * valid. This is fine since we'll likely need it soon if there is any. |
1296 | */ |
1297 | typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO); |
1298 | |
1299 | return (typentry->domainData != NULL); |
1300 | } |
1301 | |
1302 | |
1303 | /* |
1304 | * array_element_has_equality and friends are helper routines to check |
1305 | * whether we should believe that array_eq and related functions will work |
1306 | * on the given array type or composite type. |
1307 | * |
1308 | * The logic above may call these repeatedly on the same type entry, so we |
1309 | * make use of the typentry->flags field to cache the results once known. |
1310 | * Also, we assume that we'll probably want all these facts about the type |
1311 | * if we want any, so we cache them all using only one lookup of the |
1312 | * component datatype(s). |
1313 | */ |
1314 | |
1315 | static bool |
1316 | array_element_has_equality(TypeCacheEntry *typentry) |
1317 | { |
1318 | if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES)) |
1319 | cache_array_element_properties(typentry); |
1320 | return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0; |
1321 | } |
1322 | |
1323 | static bool |
1324 | array_element_has_compare(TypeCacheEntry *typentry) |
1325 | { |
1326 | if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES)) |
1327 | cache_array_element_properties(typentry); |
1328 | return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0; |
1329 | } |
1330 | |
1331 | static bool |
1332 | array_element_has_hashing(TypeCacheEntry *typentry) |
1333 | { |
1334 | if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES)) |
1335 | cache_array_element_properties(typentry); |
1336 | return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0; |
1337 | } |
1338 | |
1339 | static bool |
1340 | array_element_has_extended_hashing(TypeCacheEntry *typentry) |
1341 | { |
1342 | if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES)) |
1343 | cache_array_element_properties(typentry); |
1344 | return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0; |
1345 | } |
1346 | |
1347 | static void |
1348 | cache_array_element_properties(TypeCacheEntry *typentry) |
1349 | { |
1350 | Oid elem_type = get_base_element_type(typentry->type_id); |
1351 | |
1352 | if (OidIsValid(elem_type)) |
1353 | { |
1354 | TypeCacheEntry *elementry; |
1355 | |
1356 | elementry = lookup_type_cache(elem_type, |
1357 | TYPECACHE_EQ_OPR | |
1358 | TYPECACHE_CMP_PROC | |
1359 | TYPECACHE_HASH_PROC | |
1360 | TYPECACHE_HASH_EXTENDED_PROC); |
1361 | if (OidIsValid(elementry->eq_opr)) |
1362 | typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY; |
1363 | if (OidIsValid(elementry->cmp_proc)) |
1364 | typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE; |
1365 | if (OidIsValid(elementry->hash_proc)) |
1366 | typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING; |
1367 | if (OidIsValid(elementry->hash_extended_proc)) |
1368 | typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING; |
1369 | } |
1370 | typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES; |
1371 | } |
1372 | |
1373 | /* |
1374 | * Likewise, some helper functions for composite types. |
1375 | */ |
1376 | |
1377 | static bool |
1378 | record_fields_have_equality(TypeCacheEntry *typentry) |
1379 | { |
1380 | if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES)) |
1381 | cache_record_field_properties(typentry); |
1382 | return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0; |
1383 | } |
1384 | |
1385 | static bool |
1386 | record_fields_have_compare(TypeCacheEntry *typentry) |
1387 | { |
1388 | if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES)) |
1389 | cache_record_field_properties(typentry); |
1390 | return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0; |
1391 | } |
1392 | |
1393 | static void |
1394 | cache_record_field_properties(TypeCacheEntry *typentry) |
1395 | { |
1396 | /* |
1397 | * For type RECORD, we can't really tell what will work, since we don't |
1398 | * have access here to the specific anonymous type. Just assume that |
1399 | * everything will (we may get a failure at runtime ...) |
1400 | */ |
1401 | if (typentry->type_id == RECORDOID) |
1402 | typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY | |
1403 | TCFLAGS_HAVE_FIELD_COMPARE); |
1404 | else if (typentry->typtype == TYPTYPE_COMPOSITE) |
1405 | { |
1406 | TupleDesc tupdesc; |
1407 | int newflags; |
1408 | int i; |
1409 | |
1410 | /* Fetch composite type's tupdesc if we don't have it already */ |
1411 | if (typentry->tupDesc == NULL) |
1412 | load_typcache_tupdesc(typentry); |
1413 | tupdesc = typentry->tupDesc; |
1414 | |
1415 | /* Must bump the refcount while we do additional catalog lookups */ |
1416 | IncrTupleDescRefCount(tupdesc); |
1417 | |
1418 | /* Have each property if all non-dropped fields have the property */ |
1419 | newflags = (TCFLAGS_HAVE_FIELD_EQUALITY | |
1420 | TCFLAGS_HAVE_FIELD_COMPARE); |
1421 | for (i = 0; i < tupdesc->natts; i++) |
1422 | { |
1423 | TypeCacheEntry *fieldentry; |
1424 | Form_pg_attribute attr = TupleDescAttr(tupdesc, i); |
1425 | |
1426 | if (attr->attisdropped) |
1427 | continue; |
1428 | |
1429 | fieldentry = lookup_type_cache(attr->atttypid, |
1430 | TYPECACHE_EQ_OPR | |
1431 | TYPECACHE_CMP_PROC); |
1432 | if (!OidIsValid(fieldentry->eq_opr)) |
1433 | newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY; |
1434 | if (!OidIsValid(fieldentry->cmp_proc)) |
1435 | newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE; |
1436 | |
1437 | /* We can drop out of the loop once we disprove all bits */ |
1438 | if (newflags == 0) |
1439 | break; |
1440 | } |
1441 | typentry->flags |= newflags; |
1442 | |
1443 | DecrTupleDescRefCount(tupdesc); |
1444 | } |
1445 | else if (typentry->typtype == TYPTYPE_DOMAIN) |
1446 | { |
1447 | /* If it's domain over composite, copy base type's properties */ |
1448 | TypeCacheEntry *baseentry; |
1449 | |
1450 | /* load up basetype info if we didn't already */ |
1451 | if (typentry->domainBaseType == InvalidOid) |
1452 | { |
1453 | typentry->domainBaseTypmod = -1; |
1454 | typentry->domainBaseType = |
1455 | getBaseTypeAndTypmod(typentry->type_id, |
1456 | &typentry->domainBaseTypmod); |
1457 | } |
1458 | baseentry = lookup_type_cache(typentry->domainBaseType, |
1459 | TYPECACHE_EQ_OPR | |
1460 | TYPECACHE_CMP_PROC); |
1461 | if (baseentry->typtype == TYPTYPE_COMPOSITE) |
1462 | { |
1463 | typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE; |
1464 | typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY | |
1465 | TCFLAGS_HAVE_FIELD_COMPARE); |
1466 | } |
1467 | } |
1468 | typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES; |
1469 | } |
1470 | |
1471 | /* |
1472 | * Likewise, some helper functions for range types. |
1473 | * |
1474 | * We can borrow the flag bits for array element properties to use for range |
1475 | * element properties, since those flag bits otherwise have no use in a |
1476 | * range type's typcache entry. |
1477 | */ |
1478 | |
1479 | static bool |
1480 | range_element_has_hashing(TypeCacheEntry *typentry) |
1481 | { |
1482 | if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES)) |
1483 | cache_range_element_properties(typentry); |
1484 | return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0; |
1485 | } |
1486 | |
1487 | static bool |
1488 | range_element_has_extended_hashing(TypeCacheEntry *typentry) |
1489 | { |
1490 | if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES)) |
1491 | cache_range_element_properties(typentry); |
1492 | return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0; |
1493 | } |
1494 | |
1495 | static void |
1496 | cache_range_element_properties(TypeCacheEntry *typentry) |
1497 | { |
1498 | /* load up subtype link if we didn't already */ |
1499 | if (typentry->rngelemtype == NULL && |
1500 | typentry->typtype == TYPTYPE_RANGE) |
1501 | load_rangetype_info(typentry); |
1502 | |
1503 | if (typentry->rngelemtype != NULL) |
1504 | { |
1505 | TypeCacheEntry *elementry; |
1506 | |
1507 | /* might need to calculate subtype's hash function properties */ |
1508 | elementry = lookup_type_cache(typentry->rngelemtype->type_id, |
1509 | TYPECACHE_HASH_PROC | |
1510 | TYPECACHE_HASH_EXTENDED_PROC); |
1511 | if (OidIsValid(elementry->hash_proc)) |
1512 | typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING; |
1513 | if (OidIsValid(elementry->hash_extended_proc)) |
1514 | typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING; |
1515 | } |
1516 | typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES; |
1517 | } |
1518 | |
1519 | /* |
1520 | * Make sure that RecordCacheArray and RecordIdentifierArray are large enough |
1521 | * to store 'typmod'. |
1522 | */ |
1523 | static void |
1524 | ensure_record_cache_typmod_slot_exists(int32 typmod) |
1525 | { |
1526 | if (RecordCacheArray == NULL) |
1527 | { |
1528 | RecordCacheArray = (TupleDesc *) |
1529 | MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(TupleDesc)); |
1530 | RecordIdentifierArray = (uint64 *) |
1531 | MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(uint64)); |
1532 | RecordCacheArrayLen = 64; |
1533 | } |
1534 | |
1535 | if (typmod >= RecordCacheArrayLen) |
1536 | { |
1537 | int32 newlen = RecordCacheArrayLen * 2; |
1538 | |
1539 | while (typmod >= newlen) |
1540 | newlen *= 2; |
1541 | |
1542 | RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray, |
1543 | newlen * sizeof(TupleDesc)); |
1544 | memset(RecordCacheArray + RecordCacheArrayLen, 0, |
1545 | (newlen - RecordCacheArrayLen) * sizeof(TupleDesc)); |
1546 | RecordIdentifierArray = (uint64 *) repalloc(RecordIdentifierArray, |
1547 | newlen * sizeof(uint64)); |
1548 | memset(RecordIdentifierArray + RecordCacheArrayLen, 0, |
1549 | (newlen - RecordCacheArrayLen) * sizeof(uint64)); |
1550 | RecordCacheArrayLen = newlen; |
1551 | } |
1552 | } |
1553 | |
1554 | /* |
1555 | * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype |
1556 | * |
1557 | * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc |
1558 | * hasn't had its refcount bumped. |
1559 | */ |
1560 | static TupleDesc |
1561 | lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError) |
1562 | { |
1563 | if (type_id != RECORDOID) |
1564 | { |
1565 | /* |
1566 | * It's a named composite type, so use the regular typcache. |
1567 | */ |
1568 | TypeCacheEntry *typentry; |
1569 | |
1570 | typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC); |
1571 | if (typentry->tupDesc == NULL && !noError) |
1572 | ereport(ERROR, |
1573 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1574 | errmsg("type %s is not composite" , |
1575 | format_type_be(type_id)))); |
1576 | return typentry->tupDesc; |
1577 | } |
1578 | else |
1579 | { |
1580 | /* |
1581 | * It's a transient record type, so look in our record-type table. |
1582 | */ |
1583 | if (typmod >= 0) |
1584 | { |
1585 | /* It is already in our local cache? */ |
1586 | if (typmod < RecordCacheArrayLen && |
1587 | RecordCacheArray[typmod] != NULL) |
1588 | return RecordCacheArray[typmod]; |
1589 | |
1590 | /* Are we attached to a shared record typmod registry? */ |
1591 | if (CurrentSession->shared_typmod_registry != NULL) |
1592 | { |
1593 | SharedTypmodTableEntry *entry; |
1594 | |
1595 | /* Try to find it in the shared typmod index. */ |
1596 | entry = dshash_find(CurrentSession->shared_typmod_table, |
1597 | &typmod, false); |
1598 | if (entry != NULL) |
1599 | { |
1600 | TupleDesc tupdesc; |
1601 | |
1602 | tupdesc = (TupleDesc) |
1603 | dsa_get_address(CurrentSession->area, |
1604 | entry->shared_tupdesc); |
1605 | Assert(typmod == tupdesc->tdtypmod); |
1606 | |
1607 | /* We may need to extend the local RecordCacheArray. */ |
1608 | ensure_record_cache_typmod_slot_exists(typmod); |
1609 | |
1610 | /* |
1611 | * Our local array can now point directly to the TupleDesc |
1612 | * in shared memory, which is non-reference-counted. |
1613 | */ |
1614 | RecordCacheArray[typmod] = tupdesc; |
1615 | Assert(tupdesc->tdrefcount == -1); |
1616 | |
1617 | /* |
1618 | * We don't share tupdesc identifiers across processes, so |
1619 | * assign one locally. |
1620 | */ |
1621 | RecordIdentifierArray[typmod] = ++tupledesc_id_counter; |
1622 | |
1623 | dshash_release_lock(CurrentSession->shared_typmod_table, |
1624 | entry); |
1625 | |
1626 | return RecordCacheArray[typmod]; |
1627 | } |
1628 | } |
1629 | } |
1630 | |
1631 | if (!noError) |
1632 | ereport(ERROR, |
1633 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1634 | errmsg("record type has not been registered" ))); |
1635 | return NULL; |
1636 | } |
1637 | } |
1638 | |
1639 | /* |
1640 | * lookup_rowtype_tupdesc |
1641 | * |
1642 | * Given a typeid/typmod that should describe a known composite type, |
1643 | * return the tuple descriptor for the type. Will ereport on failure. |
1644 | * (Use ereport because this is reachable with user-specified OIDs, |
1645 | * for example from record_in().) |
1646 | * |
1647 | * Note: on success, we increment the refcount of the returned TupleDesc, |
1648 | * and log the reference in CurrentResourceOwner. Caller should call |
1649 | * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc. |
1650 | */ |
1651 | TupleDesc |
1652 | lookup_rowtype_tupdesc(Oid type_id, int32 typmod) |
1653 | { |
1654 | TupleDesc tupDesc; |
1655 | |
1656 | tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false); |
1657 | PinTupleDesc(tupDesc); |
1658 | return tupDesc; |
1659 | } |
1660 | |
1661 | /* |
1662 | * lookup_rowtype_tupdesc_noerror |
1663 | * |
1664 | * As above, but if the type is not a known composite type and noError |
1665 | * is true, returns NULL instead of ereport'ing. (Note that if a bogus |
1666 | * type_id is passed, you'll get an ereport anyway.) |
1667 | */ |
1668 | TupleDesc |
1669 | lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError) |
1670 | { |
1671 | TupleDesc tupDesc; |
1672 | |
1673 | tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError); |
1674 | if (tupDesc != NULL) |
1675 | PinTupleDesc(tupDesc); |
1676 | return tupDesc; |
1677 | } |
1678 | |
1679 | /* |
1680 | * lookup_rowtype_tupdesc_copy |
1681 | * |
1682 | * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been |
1683 | * copied into the CurrentMemoryContext and is not reference-counted. |
1684 | */ |
1685 | TupleDesc |
1686 | lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod) |
1687 | { |
1688 | TupleDesc tmp; |
1689 | |
1690 | tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false); |
1691 | return CreateTupleDescCopyConstr(tmp); |
1692 | } |
1693 | |
1694 | /* |
1695 | * lookup_rowtype_tupdesc_domain |
1696 | * |
1697 | * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be |
1698 | * a domain over a named composite type; so this is effectively equivalent to |
1699 | * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError) |
1700 | * except for being a tad faster. |
1701 | * |
1702 | * Note: the reason we don't fold the look-through-domain behavior into plain |
1703 | * lookup_rowtype_tupdesc() is that we want callers to know they might be |
1704 | * dealing with a domain. Otherwise they might construct a tuple that should |
1705 | * be of the domain type, but not apply domain constraints. |
1706 | */ |
1707 | TupleDesc |
1708 | lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError) |
1709 | { |
1710 | TupleDesc tupDesc; |
1711 | |
1712 | if (type_id != RECORDOID) |
1713 | { |
1714 | /* |
1715 | * Check for domain or named composite type. We might as well load |
1716 | * whichever data is needed. |
1717 | */ |
1718 | TypeCacheEntry *typentry; |
1719 | |
1720 | typentry = lookup_type_cache(type_id, |
1721 | TYPECACHE_TUPDESC | |
1722 | TYPECACHE_DOMAIN_BASE_INFO); |
1723 | if (typentry->typtype == TYPTYPE_DOMAIN) |
1724 | return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType, |
1725 | typentry->domainBaseTypmod, |
1726 | noError); |
1727 | if (typentry->tupDesc == NULL && !noError) |
1728 | ereport(ERROR, |
1729 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1730 | errmsg("type %s is not composite" , |
1731 | format_type_be(type_id)))); |
1732 | tupDesc = typentry->tupDesc; |
1733 | } |
1734 | else |
1735 | tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError); |
1736 | if (tupDesc != NULL) |
1737 | PinTupleDesc(tupDesc); |
1738 | return tupDesc; |
1739 | } |
1740 | |
1741 | /* |
1742 | * Hash function for the hash table of RecordCacheEntry. |
1743 | */ |
1744 | static uint32 |
1745 | record_type_typmod_hash(const void *data, size_t size) |
1746 | { |
1747 | RecordCacheEntry *entry = (RecordCacheEntry *) data; |
1748 | |
1749 | return hashTupleDesc(entry->tupdesc); |
1750 | } |
1751 | |
1752 | /* |
1753 | * Match function for the hash table of RecordCacheEntry. |
1754 | */ |
1755 | static int |
1756 | record_type_typmod_compare(const void *a, const void *b, size_t size) |
1757 | { |
1758 | RecordCacheEntry *left = (RecordCacheEntry *) a; |
1759 | RecordCacheEntry *right = (RecordCacheEntry *) b; |
1760 | |
1761 | return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1; |
1762 | } |
1763 | |
1764 | /* |
1765 | * assign_record_type_typmod |
1766 | * |
1767 | * Given a tuple descriptor for a RECORD type, find or create a cache entry |
1768 | * for the type, and set the tupdesc's tdtypmod field to a value that will |
1769 | * identify this cache entry to lookup_rowtype_tupdesc. |
1770 | */ |
1771 | void |
1772 | assign_record_type_typmod(TupleDesc tupDesc) |
1773 | { |
1774 | RecordCacheEntry *recentry; |
1775 | TupleDesc entDesc; |
1776 | bool found; |
1777 | MemoryContext oldcxt; |
1778 | |
1779 | Assert(tupDesc->tdtypeid == RECORDOID); |
1780 | |
1781 | if (RecordCacheHash == NULL) |
1782 | { |
1783 | /* First time through: initialize the hash table */ |
1784 | HASHCTL ctl; |
1785 | |
1786 | MemSet(&ctl, 0, sizeof(ctl)); |
1787 | ctl.keysize = sizeof(TupleDesc); /* just the pointer */ |
1788 | ctl.entrysize = sizeof(RecordCacheEntry); |
1789 | ctl.hash = record_type_typmod_hash; |
1790 | ctl.match = record_type_typmod_compare; |
1791 | RecordCacheHash = hash_create("Record information cache" , 64, |
1792 | &ctl, |
1793 | HASH_ELEM | HASH_FUNCTION | HASH_COMPARE); |
1794 | |
1795 | /* Also make sure CacheMemoryContext exists */ |
1796 | if (!CacheMemoryContext) |
1797 | CreateCacheMemoryContext(); |
1798 | } |
1799 | |
1800 | /* Find or create a hashtable entry for this tuple descriptor */ |
1801 | recentry = (RecordCacheEntry *) hash_search(RecordCacheHash, |
1802 | (void *) &tupDesc, |
1803 | HASH_ENTER, &found); |
1804 | if (found && recentry->tupdesc != NULL) |
1805 | { |
1806 | tupDesc->tdtypmod = recentry->tupdesc->tdtypmod; |
1807 | return; |
1808 | } |
1809 | |
1810 | /* Not present, so need to manufacture an entry */ |
1811 | recentry->tupdesc = NULL; |
1812 | oldcxt = MemoryContextSwitchTo(CacheMemoryContext); |
1813 | |
1814 | /* Look in the SharedRecordTypmodRegistry, if attached */ |
1815 | entDesc = find_or_make_matching_shared_tupledesc(tupDesc); |
1816 | if (entDesc == NULL) |
1817 | { |
1818 | /* Reference-counted local cache only. */ |
1819 | entDesc = CreateTupleDescCopy(tupDesc); |
1820 | entDesc->tdrefcount = 1; |
1821 | entDesc->tdtypmod = NextRecordTypmod++; |
1822 | } |
1823 | ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod); |
1824 | RecordCacheArray[entDesc->tdtypmod] = entDesc; |
1825 | recentry->tupdesc = entDesc; |
1826 | |
1827 | /* Assign a unique tupdesc identifier, too. */ |
1828 | RecordIdentifierArray[entDesc->tdtypmod] = ++tupledesc_id_counter; |
1829 | |
1830 | /* Update the caller's tuple descriptor. */ |
1831 | tupDesc->tdtypmod = entDesc->tdtypmod; |
1832 | |
1833 | MemoryContextSwitchTo(oldcxt); |
1834 | } |
1835 | |
1836 | /* |
1837 | * assign_record_type_identifier |
1838 | * |
1839 | * Get an identifier, which will be unique over the lifespan of this backend |
1840 | * process, for the current tuple descriptor of the specified composite type. |
1841 | * For named composite types, the value is guaranteed to change if the type's |
1842 | * definition does. For registered RECORD types, the value will not change |
1843 | * once assigned, since the registered type won't either. If an anonymous |
1844 | * RECORD type is specified, we return a new identifier on each call. |
1845 | */ |
1846 | uint64 |
1847 | assign_record_type_identifier(Oid type_id, int32 typmod) |
1848 | { |
1849 | if (type_id != RECORDOID) |
1850 | { |
1851 | /* |
1852 | * It's a named composite type, so use the regular typcache. |
1853 | */ |
1854 | TypeCacheEntry *typentry; |
1855 | |
1856 | typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC); |
1857 | if (typentry->tupDesc == NULL) |
1858 | ereport(ERROR, |
1859 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
1860 | errmsg("type %s is not composite" , |
1861 | format_type_be(type_id)))); |
1862 | Assert(typentry->tupDesc_identifier != 0); |
1863 | return typentry->tupDesc_identifier; |
1864 | } |
1865 | else |
1866 | { |
1867 | /* |
1868 | * It's a transient record type, so look in our record-type table. |
1869 | */ |
1870 | if (typmod >= 0 && typmod < RecordCacheArrayLen && |
1871 | RecordCacheArray[typmod] != NULL) |
1872 | { |
1873 | Assert(RecordIdentifierArray[typmod] != 0); |
1874 | return RecordIdentifierArray[typmod]; |
1875 | } |
1876 | |
1877 | /* For anonymous or unrecognized record type, generate a new ID */ |
1878 | return ++tupledesc_id_counter; |
1879 | } |
1880 | } |
1881 | |
1882 | /* |
1883 | * Return the amount of shmem required to hold a SharedRecordTypmodRegistry. |
1884 | * This exists only to avoid exposing private innards of |
1885 | * SharedRecordTypmodRegistry in a header. |
1886 | */ |
1887 | size_t |
1888 | SharedRecordTypmodRegistryEstimate(void) |
1889 | { |
1890 | return sizeof(SharedRecordTypmodRegistry); |
1891 | } |
1892 | |
1893 | /* |
1894 | * Initialize 'registry' in a pre-existing shared memory region, which must be |
1895 | * maximally aligned and have space for SharedRecordTypmodRegistryEstimate() |
1896 | * bytes. |
1897 | * |
1898 | * 'area' will be used to allocate shared memory space as required for the |
1899 | * typemod registration. The current process, expected to be a leader process |
1900 | * in a parallel query, will be attached automatically and its current record |
1901 | * types will be loaded into *registry. While attached, all calls to |
1902 | * assign_record_type_typmod will use the shared registry. Worker backends |
1903 | * will need to attach explicitly. |
1904 | * |
1905 | * Note that this function takes 'area' and 'segment' as arguments rather than |
1906 | * accessing them via CurrentSession, because they aren't installed there |
1907 | * until after this function runs. |
1908 | */ |
1909 | void |
1910 | SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, |
1911 | dsm_segment *segment, |
1912 | dsa_area *area) |
1913 | { |
1914 | MemoryContext old_context; |
1915 | dshash_table *record_table; |
1916 | dshash_table *typmod_table; |
1917 | int32 typmod; |
1918 | |
1919 | Assert(!IsParallelWorker()); |
1920 | |
1921 | /* We can't already be attached to a shared registry. */ |
1922 | Assert(CurrentSession->shared_typmod_registry == NULL); |
1923 | Assert(CurrentSession->shared_record_table == NULL); |
1924 | Assert(CurrentSession->shared_typmod_table == NULL); |
1925 | |
1926 | old_context = MemoryContextSwitchTo(TopMemoryContext); |
1927 | |
1928 | /* Create the hash table of tuple descriptors indexed by themselves. */ |
1929 | record_table = dshash_create(area, &srtr_record_table_params, area); |
1930 | |
1931 | /* Create the hash table of tuple descriptors indexed by typmod. */ |
1932 | typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL); |
1933 | |
1934 | MemoryContextSwitchTo(old_context); |
1935 | |
1936 | /* Initialize the SharedRecordTypmodRegistry. */ |
1937 | registry->record_table_handle = dshash_get_hash_table_handle(record_table); |
1938 | registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table); |
1939 | pg_atomic_init_u32(®istry->next_typmod, NextRecordTypmod); |
1940 | |
1941 | /* |
1942 | * Copy all entries from this backend's private registry into the shared |
1943 | * registry. |
1944 | */ |
1945 | for (typmod = 0; typmod < NextRecordTypmod; ++typmod) |
1946 | { |
1947 | SharedTypmodTableEntry *typmod_table_entry; |
1948 | SharedRecordTableEntry *record_table_entry; |
1949 | SharedRecordTableKey record_table_key; |
1950 | dsa_pointer shared_dp; |
1951 | TupleDesc tupdesc; |
1952 | bool found; |
1953 | |
1954 | tupdesc = RecordCacheArray[typmod]; |
1955 | if (tupdesc == NULL) |
1956 | continue; |
1957 | |
1958 | /* Copy the TupleDesc into shared memory. */ |
1959 | shared_dp = share_tupledesc(area, tupdesc, typmod); |
1960 | |
1961 | /* Insert into the typmod table. */ |
1962 | typmod_table_entry = dshash_find_or_insert(typmod_table, |
1963 | &tupdesc->tdtypmod, |
1964 | &found); |
1965 | if (found) |
1966 | elog(ERROR, "cannot create duplicate shared record typmod" ); |
1967 | typmod_table_entry->typmod = tupdesc->tdtypmod; |
1968 | typmod_table_entry->shared_tupdesc = shared_dp; |
1969 | dshash_release_lock(typmod_table, typmod_table_entry); |
1970 | |
1971 | /* Insert into the record table. */ |
1972 | record_table_key.shared = false; |
1973 | record_table_key.u.local_tupdesc = tupdesc; |
1974 | record_table_entry = dshash_find_or_insert(record_table, |
1975 | &record_table_key, |
1976 | &found); |
1977 | if (!found) |
1978 | { |
1979 | record_table_entry->key.shared = true; |
1980 | record_table_entry->key.u.shared_tupdesc = shared_dp; |
1981 | } |
1982 | dshash_release_lock(record_table, record_table_entry); |
1983 | } |
1984 | |
1985 | /* |
1986 | * Set up the global state that will tell assign_record_type_typmod and |
1987 | * lookup_rowtype_tupdesc_internal about the shared registry. |
1988 | */ |
1989 | CurrentSession->shared_record_table = record_table; |
1990 | CurrentSession->shared_typmod_table = typmod_table; |
1991 | CurrentSession->shared_typmod_registry = registry; |
1992 | |
1993 | /* |
1994 | * We install a detach hook in the leader, but only to handle cleanup on |
1995 | * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins |
1996 | * the memory, the leader process will use a shared registry until it |
1997 | * exits. |
1998 | */ |
1999 | on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0); |
2000 | } |
2001 | |
2002 | /* |
2003 | * Attach to 'registry', which must have been initialized already by another |
2004 | * backend. Future calls to assign_record_type_typmod and |
2005 | * lookup_rowtype_tupdesc_internal will use the shared registry until the |
2006 | * current session is detached. |
2007 | */ |
2008 | void |
2009 | SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry) |
2010 | { |
2011 | MemoryContext old_context; |
2012 | dshash_table *record_table; |
2013 | dshash_table *typmod_table; |
2014 | |
2015 | Assert(IsParallelWorker()); |
2016 | |
2017 | /* We can't already be attached to a shared registry. */ |
2018 | Assert(CurrentSession != NULL); |
2019 | Assert(CurrentSession->segment != NULL); |
2020 | Assert(CurrentSession->area != NULL); |
2021 | Assert(CurrentSession->shared_typmod_registry == NULL); |
2022 | Assert(CurrentSession->shared_record_table == NULL); |
2023 | Assert(CurrentSession->shared_typmod_table == NULL); |
2024 | |
2025 | /* |
2026 | * We can't already have typmods in our local cache, because they'd clash |
2027 | * with those imported by SharedRecordTypmodRegistryInit. This should be |
2028 | * a freshly started parallel worker. If we ever support worker |
2029 | * recycling, a worker would need to zap its local cache in between |
2030 | * servicing different queries, in order to be able to call this and |
2031 | * synchronize typmods with a new leader; but that's problematic because |
2032 | * we can't be very sure that record-typmod-related state hasn't escaped |
2033 | * to anywhere else in the process. |
2034 | */ |
2035 | Assert(NextRecordTypmod == 0); |
2036 | |
2037 | old_context = MemoryContextSwitchTo(TopMemoryContext); |
2038 | |
2039 | /* Attach to the two hash tables. */ |
2040 | record_table = dshash_attach(CurrentSession->area, |
2041 | &srtr_record_table_params, |
2042 | registry->record_table_handle, |
2043 | CurrentSession->area); |
2044 | typmod_table = dshash_attach(CurrentSession->area, |
2045 | &srtr_typmod_table_params, |
2046 | registry->typmod_table_handle, |
2047 | NULL); |
2048 | |
2049 | MemoryContextSwitchTo(old_context); |
2050 | |
2051 | /* |
2052 | * Set up detach hook to run at worker exit. Currently this is the same |
2053 | * as the leader's detach hook, but in future they might need to be |
2054 | * different. |
2055 | */ |
2056 | on_dsm_detach(CurrentSession->segment, |
2057 | shared_record_typmod_registry_detach, |
2058 | PointerGetDatum(registry)); |
2059 | |
2060 | /* |
2061 | * Set up the session state that will tell assign_record_type_typmod and |
2062 | * lookup_rowtype_tupdesc_internal about the shared registry. |
2063 | */ |
2064 | CurrentSession->shared_typmod_registry = registry; |
2065 | CurrentSession->shared_record_table = record_table; |
2066 | CurrentSession->shared_typmod_table = typmod_table; |
2067 | } |
2068 | |
2069 | /* |
2070 | * TypeCacheRelCallback |
2071 | * Relcache inval callback function |
2072 | * |
2073 | * Delete the cached tuple descriptor (if any) for the given rel's composite |
2074 | * type, or for all composite types if relid == InvalidOid. Also reset |
2075 | * whatever info we have cached about the composite type's comparability. |
2076 | * |
2077 | * This is called when a relcache invalidation event occurs for the given |
2078 | * relid. We must scan the whole typcache hash since we don't know the |
2079 | * type OID corresponding to the relid. We could do a direct search if this |
2080 | * were a syscache-flush callback on pg_type, but then we would need all |
2081 | * ALTER-TABLE-like commands that could modify a rowtype to issue syscache |
2082 | * invals against the rel's pg_type OID. The extra SI signaling could very |
2083 | * well cost more than we'd save, since in most usages there are not very |
2084 | * many entries in a backend's typcache. The risk of bugs-of-omission seems |
2085 | * high, too. |
2086 | * |
2087 | * Another possibility, with only localized impact, is to maintain a second |
2088 | * hashtable that indexes composite-type typcache entries by their typrelid. |
2089 | * But it's still not clear it's worth the trouble. |
2090 | */ |
2091 | static void |
2092 | TypeCacheRelCallback(Datum arg, Oid relid) |
2093 | { |
2094 | HASH_SEQ_STATUS status; |
2095 | TypeCacheEntry *typentry; |
2096 | |
2097 | /* TypeCacheHash must exist, else this callback wouldn't be registered */ |
2098 | hash_seq_init(&status, TypeCacheHash); |
2099 | while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL) |
2100 | { |
2101 | if (typentry->typtype == TYPTYPE_COMPOSITE) |
2102 | { |
2103 | /* Skip if no match, unless we're zapping all composite types */ |
2104 | if (relid != typentry->typrelid && relid != InvalidOid) |
2105 | continue; |
2106 | |
2107 | /* Delete tupdesc if we have it */ |
2108 | if (typentry->tupDesc != NULL) |
2109 | { |
2110 | /* |
2111 | * Release our refcount, and free the tupdesc if none remain. |
2112 | * (Can't use DecrTupleDescRefCount because this reference is |
2113 | * not logged in current resource owner.) |
2114 | */ |
2115 | Assert(typentry->tupDesc->tdrefcount > 0); |
2116 | if (--typentry->tupDesc->tdrefcount == 0) |
2117 | FreeTupleDesc(typentry->tupDesc); |
2118 | typentry->tupDesc = NULL; |
2119 | |
2120 | /* |
2121 | * Also clear tupDesc_identifier, so that anything watching |
2122 | * that will realize that the tupdesc has possibly changed. |
2123 | * (Alternatively, we could specify that to detect possible |
2124 | * tupdesc change, one must check for tupDesc != NULL as well |
2125 | * as tupDesc_identifier being the same as what was previously |
2126 | * seen. That seems error-prone.) |
2127 | */ |
2128 | typentry->tupDesc_identifier = 0; |
2129 | } |
2130 | |
2131 | /* Reset equality/comparison/hashing validity information */ |
2132 | typentry->flags = 0; |
2133 | } |
2134 | else if (typentry->typtype == TYPTYPE_DOMAIN) |
2135 | { |
2136 | /* |
2137 | * If it's domain over composite, reset flags. (We don't bother |
2138 | * trying to determine whether the specific base type needs a |
2139 | * reset.) Note that if we haven't determined whether the base |
2140 | * type is composite, we don't need to reset anything. |
2141 | */ |
2142 | if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE) |
2143 | typentry->flags = 0; |
2144 | } |
2145 | } |
2146 | } |
2147 | |
2148 | /* |
2149 | * TypeCacheOpcCallback |
2150 | * Syscache inval callback function |
2151 | * |
2152 | * This is called when a syscache invalidation event occurs for any pg_opclass |
2153 | * row. In principle we could probably just invalidate data dependent on the |
2154 | * particular opclass, but since updates on pg_opclass are rare in production |
2155 | * it doesn't seem worth a lot of complication: we just mark all cached data |
2156 | * invalid. |
2157 | * |
2158 | * Note that we don't bother watching for updates on pg_amop or pg_amproc. |
2159 | * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION |
2160 | * is not allowed to be used to add/drop the primary operators and functions |
2161 | * of an opclass, only cross-type members of a family; and the latter sorts |
2162 | * of members are not going to get cached here. |
2163 | */ |
2164 | static void |
2165 | TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue) |
2166 | { |
2167 | HASH_SEQ_STATUS status; |
2168 | TypeCacheEntry *typentry; |
2169 | |
2170 | /* TypeCacheHash must exist, else this callback wouldn't be registered */ |
2171 | hash_seq_init(&status, TypeCacheHash); |
2172 | while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL) |
2173 | { |
2174 | /* Reset equality/comparison/hashing validity information */ |
2175 | typentry->flags = 0; |
2176 | } |
2177 | } |
2178 | |
2179 | /* |
2180 | * TypeCacheConstrCallback |
2181 | * Syscache inval callback function |
2182 | * |
2183 | * This is called when a syscache invalidation event occurs for any |
2184 | * pg_constraint or pg_type row. We flush information about domain |
2185 | * constraints when this happens. |
2186 | * |
2187 | * It's slightly annoying that we can't tell whether the inval event was for a |
2188 | * domain constraint/type record or not; there's usually more update traffic |
2189 | * for table constraints/types than domain constraints, so we'll do a lot of |
2190 | * useless flushes. Still, this is better than the old no-caching-at-all |
2191 | * approach to domain constraints. |
2192 | */ |
2193 | static void |
2194 | TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue) |
2195 | { |
2196 | TypeCacheEntry *typentry; |
2197 | |
2198 | /* |
2199 | * Because this is called very frequently, and typically very few of the |
2200 | * typcache entries are for domains, we don't use hash_seq_search here. |
2201 | * Instead we thread all the domain-type entries together so that we can |
2202 | * visit them cheaply. |
2203 | */ |
2204 | for (typentry = firstDomainTypeEntry; |
2205 | typentry != NULL; |
2206 | typentry = typentry->nextDomain) |
2207 | { |
2208 | /* Reset domain constraint validity information */ |
2209 | typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS; |
2210 | } |
2211 | } |
2212 | |
2213 | |
2214 | /* |
2215 | * Check if given OID is part of the subset that's sortable by comparisons |
2216 | */ |
2217 | static inline bool |
2218 | enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg) |
2219 | { |
2220 | Oid offset; |
2221 | |
2222 | if (arg < enumdata->bitmap_base) |
2223 | return false; |
2224 | offset = arg - enumdata->bitmap_base; |
2225 | if (offset > (Oid) INT_MAX) |
2226 | return false; |
2227 | return bms_is_member((int) offset, enumdata->sorted_values); |
2228 | } |
2229 | |
2230 | |
2231 | /* |
2232 | * compare_values_of_enum |
2233 | * Compare two members of an enum type. |
2234 | * Return <0, 0, or >0 according as arg1 <, =, or > arg2. |
2235 | * |
2236 | * Note: currently, the enumData cache is refreshed only if we are asked |
2237 | * to compare an enum value that is not already in the cache. This is okay |
2238 | * because there is no support for re-ordering existing values, so comparisons |
2239 | * of previously cached values will return the right answer even if other |
2240 | * values have been added since we last loaded the cache. |
2241 | * |
2242 | * Note: the enum logic has a special-case rule about even-numbered versus |
2243 | * odd-numbered OIDs, but we take no account of that rule here; this |
2244 | * routine shouldn't even get called when that rule applies. |
2245 | */ |
2246 | int |
2247 | compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2) |
2248 | { |
2249 | TypeCacheEnumData *enumdata; |
2250 | EnumItem *item1; |
2251 | EnumItem *item2; |
2252 | |
2253 | /* |
2254 | * Equal OIDs are certainly equal --- this case was probably handled by |
2255 | * our caller, but we may as well check. |
2256 | */ |
2257 | if (arg1 == arg2) |
2258 | return 0; |
2259 | |
2260 | /* Load up the cache if first time through */ |
2261 | if (tcache->enumData == NULL) |
2262 | load_enum_cache_data(tcache); |
2263 | enumdata = tcache->enumData; |
2264 | |
2265 | /* |
2266 | * If both OIDs are known-sorted, we can just compare them directly. |
2267 | */ |
2268 | if (enum_known_sorted(enumdata, arg1) && |
2269 | enum_known_sorted(enumdata, arg2)) |
2270 | { |
2271 | if (arg1 < arg2) |
2272 | return -1; |
2273 | else |
2274 | return 1; |
2275 | } |
2276 | |
2277 | /* |
2278 | * Slow path: we have to identify their actual sort-order positions. |
2279 | */ |
2280 | item1 = find_enumitem(enumdata, arg1); |
2281 | item2 = find_enumitem(enumdata, arg2); |
2282 | |
2283 | if (item1 == NULL || item2 == NULL) |
2284 | { |
2285 | /* |
2286 | * We couldn't find one or both values. That means the enum has |
2287 | * changed under us, so re-initialize the cache and try again. We |
2288 | * don't bother retrying the known-sorted case in this path. |
2289 | */ |
2290 | load_enum_cache_data(tcache); |
2291 | enumdata = tcache->enumData; |
2292 | |
2293 | item1 = find_enumitem(enumdata, arg1); |
2294 | item2 = find_enumitem(enumdata, arg2); |
2295 | |
2296 | /* |
2297 | * If we still can't find the values, complain: we must have corrupt |
2298 | * data. |
2299 | */ |
2300 | if (item1 == NULL) |
2301 | elog(ERROR, "enum value %u not found in cache for enum %s" , |
2302 | arg1, format_type_be(tcache->type_id)); |
2303 | if (item2 == NULL) |
2304 | elog(ERROR, "enum value %u not found in cache for enum %s" , |
2305 | arg2, format_type_be(tcache->type_id)); |
2306 | } |
2307 | |
2308 | if (item1->sort_order < item2->sort_order) |
2309 | return -1; |
2310 | else if (item1->sort_order > item2->sort_order) |
2311 | return 1; |
2312 | else |
2313 | return 0; |
2314 | } |
2315 | |
2316 | /* |
2317 | * Load (or re-load) the enumData member of the typcache entry. |
2318 | */ |
2319 | static void |
2320 | load_enum_cache_data(TypeCacheEntry *tcache) |
2321 | { |
2322 | TypeCacheEnumData *enumdata; |
2323 | Relation enum_rel; |
2324 | SysScanDesc enum_scan; |
2325 | HeapTuple enum_tuple; |
2326 | ScanKeyData skey; |
2327 | EnumItem *items; |
2328 | int numitems; |
2329 | int maxitems; |
2330 | Oid bitmap_base; |
2331 | Bitmapset *bitmap; |
2332 | MemoryContext oldcxt; |
2333 | int bm_size, |
2334 | start_pos; |
2335 | |
2336 | /* Check that this is actually an enum */ |
2337 | if (tcache->typtype != TYPTYPE_ENUM) |
2338 | ereport(ERROR, |
2339 | (errcode(ERRCODE_WRONG_OBJECT_TYPE), |
2340 | errmsg("%s is not an enum" , |
2341 | format_type_be(tcache->type_id)))); |
2342 | |
2343 | /* |
2344 | * Read all the information for members of the enum type. We collect the |
2345 | * info in working memory in the caller's context, and then transfer it to |
2346 | * permanent memory in CacheMemoryContext. This minimizes the risk of |
2347 | * leaking memory from CacheMemoryContext in the event of an error partway |
2348 | * through. |
2349 | */ |
2350 | maxitems = 64; |
2351 | items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems); |
2352 | numitems = 0; |
2353 | |
2354 | /* Scan pg_enum for the members of the target enum type. */ |
2355 | ScanKeyInit(&skey, |
2356 | Anum_pg_enum_enumtypid, |
2357 | BTEqualStrategyNumber, F_OIDEQ, |
2358 | ObjectIdGetDatum(tcache->type_id)); |
2359 | |
2360 | enum_rel = table_open(EnumRelationId, AccessShareLock); |
2361 | enum_scan = systable_beginscan(enum_rel, |
2362 | EnumTypIdLabelIndexId, |
2363 | true, NULL, |
2364 | 1, &skey); |
2365 | |
2366 | while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan))) |
2367 | { |
2368 | Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple); |
2369 | |
2370 | if (numitems >= maxitems) |
2371 | { |
2372 | maxitems *= 2; |
2373 | items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems); |
2374 | } |
2375 | items[numitems].enum_oid = en->oid; |
2376 | items[numitems].sort_order = en->enumsortorder; |
2377 | numitems++; |
2378 | } |
2379 | |
2380 | systable_endscan(enum_scan); |
2381 | table_close(enum_rel, AccessShareLock); |
2382 | |
2383 | /* Sort the items into OID order */ |
2384 | qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp); |
2385 | |
2386 | /* |
2387 | * Here, we create a bitmap listing a subset of the enum's OIDs that are |
2388 | * known to be in order and can thus be compared with just OID comparison. |
2389 | * |
2390 | * The point of this is that the enum's initial OIDs were certainly in |
2391 | * order, so there is some subset that can be compared via OID comparison; |
2392 | * and we'd rather not do binary searches unnecessarily. |
2393 | * |
2394 | * This is somewhat heuristic, and might identify a subset of OIDs that |
2395 | * isn't exactly what the type started with. That's okay as long as the |
2396 | * subset is correctly sorted. |
2397 | */ |
2398 | bitmap_base = InvalidOid; |
2399 | bitmap = NULL; |
2400 | bm_size = 1; /* only save sets of at least 2 OIDs */ |
2401 | |
2402 | for (start_pos = 0; start_pos < numitems - 1; start_pos++) |
2403 | { |
2404 | /* |
2405 | * Identify longest sorted subsequence starting at start_pos |
2406 | */ |
2407 | Bitmapset *this_bitmap = bms_make_singleton(0); |
2408 | int this_bm_size = 1; |
2409 | Oid start_oid = items[start_pos].enum_oid; |
2410 | float4 prev_order = items[start_pos].sort_order; |
2411 | int i; |
2412 | |
2413 | for (i = start_pos + 1; i < numitems; i++) |
2414 | { |
2415 | Oid offset; |
2416 | |
2417 | offset = items[i].enum_oid - start_oid; |
2418 | /* quit if bitmap would be too large; cutoff is arbitrary */ |
2419 | if (offset >= 8192) |
2420 | break; |
2421 | /* include the item if it's in-order */ |
2422 | if (items[i].sort_order > prev_order) |
2423 | { |
2424 | prev_order = items[i].sort_order; |
2425 | this_bitmap = bms_add_member(this_bitmap, (int) offset); |
2426 | this_bm_size++; |
2427 | } |
2428 | } |
2429 | |
2430 | /* Remember it if larger than previous best */ |
2431 | if (this_bm_size > bm_size) |
2432 | { |
2433 | bms_free(bitmap); |
2434 | bitmap_base = start_oid; |
2435 | bitmap = this_bitmap; |
2436 | bm_size = this_bm_size; |
2437 | } |
2438 | else |
2439 | bms_free(this_bitmap); |
2440 | |
2441 | /* |
2442 | * Done if it's not possible to find a longer sequence in the rest of |
2443 | * the list. In typical cases this will happen on the first |
2444 | * iteration, which is why we create the bitmaps on the fly instead of |
2445 | * doing a second pass over the list. |
2446 | */ |
2447 | if (bm_size >= (numitems - start_pos - 1)) |
2448 | break; |
2449 | } |
2450 | |
2451 | /* OK, copy the data into CacheMemoryContext */ |
2452 | oldcxt = MemoryContextSwitchTo(CacheMemoryContext); |
2453 | enumdata = (TypeCacheEnumData *) |
2454 | palloc(offsetof(TypeCacheEnumData, enum_values) + |
2455 | numitems * sizeof(EnumItem)); |
2456 | enumdata->bitmap_base = bitmap_base; |
2457 | enumdata->sorted_values = bms_copy(bitmap); |
2458 | enumdata->num_values = numitems; |
2459 | memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem)); |
2460 | MemoryContextSwitchTo(oldcxt); |
2461 | |
2462 | pfree(items); |
2463 | bms_free(bitmap); |
2464 | |
2465 | /* And link the finished cache struct into the typcache */ |
2466 | if (tcache->enumData != NULL) |
2467 | pfree(tcache->enumData); |
2468 | tcache->enumData = enumdata; |
2469 | } |
2470 | |
2471 | /* |
2472 | * Locate the EnumItem with the given OID, if present |
2473 | */ |
2474 | static EnumItem * |
2475 | find_enumitem(TypeCacheEnumData *enumdata, Oid arg) |
2476 | { |
2477 | EnumItem srch; |
2478 | |
2479 | /* On some versions of Solaris, bsearch of zero items dumps core */ |
2480 | if (enumdata->num_values <= 0) |
2481 | return NULL; |
2482 | |
2483 | srch.enum_oid = arg; |
2484 | return bsearch(&srch, enumdata->enum_values, enumdata->num_values, |
2485 | sizeof(EnumItem), enum_oid_cmp); |
2486 | } |
2487 | |
2488 | /* |
2489 | * qsort comparison function for OID-ordered EnumItems |
2490 | */ |
2491 | static int |
2492 | enum_oid_cmp(const void *left, const void *right) |
2493 | { |
2494 | const EnumItem *l = (const EnumItem *) left; |
2495 | const EnumItem *r = (const EnumItem *) right; |
2496 | |
2497 | if (l->enum_oid < r->enum_oid) |
2498 | return -1; |
2499 | else if (l->enum_oid > r->enum_oid) |
2500 | return 1; |
2501 | else |
2502 | return 0; |
2503 | } |
2504 | |
2505 | /* |
2506 | * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod |
2507 | * to the given value and return a dsa_pointer. |
2508 | */ |
2509 | static dsa_pointer |
2510 | share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod) |
2511 | { |
2512 | dsa_pointer shared_dp; |
2513 | TupleDesc shared; |
2514 | |
2515 | shared_dp = dsa_allocate(area, TupleDescSize(tupdesc)); |
2516 | shared = (TupleDesc) dsa_get_address(area, shared_dp); |
2517 | TupleDescCopy(shared, tupdesc); |
2518 | shared->tdtypmod = typmod; |
2519 | |
2520 | return shared_dp; |
2521 | } |
2522 | |
2523 | /* |
2524 | * If we are attached to a SharedRecordTypmodRegistry, use it to find or |
2525 | * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL. |
2526 | * Tuple descriptors returned by this function are not reference counted, and |
2527 | * will exist at least as long as the current backend remained attached to the |
2528 | * current session. |
2529 | */ |
2530 | static TupleDesc |
2531 | find_or_make_matching_shared_tupledesc(TupleDesc tupdesc) |
2532 | { |
2533 | TupleDesc result; |
2534 | SharedRecordTableKey key; |
2535 | SharedRecordTableEntry *record_table_entry; |
2536 | SharedTypmodTableEntry *typmod_table_entry; |
2537 | dsa_pointer shared_dp; |
2538 | bool found; |
2539 | uint32 typmod; |
2540 | |
2541 | /* If not even attached, nothing to do. */ |
2542 | if (CurrentSession->shared_typmod_registry == NULL) |
2543 | return NULL; |
2544 | |
2545 | /* Try to find a matching tuple descriptor in the record table. */ |
2546 | key.shared = false; |
2547 | key.u.local_tupdesc = tupdesc; |
2548 | record_table_entry = (SharedRecordTableEntry *) |
2549 | dshash_find(CurrentSession->shared_record_table, &key, false); |
2550 | if (record_table_entry) |
2551 | { |
2552 | Assert(record_table_entry->key.shared); |
2553 | dshash_release_lock(CurrentSession->shared_record_table, |
2554 | record_table_entry); |
2555 | result = (TupleDesc) |
2556 | dsa_get_address(CurrentSession->area, |
2557 | record_table_entry->key.u.shared_tupdesc); |
2558 | Assert(result->tdrefcount == -1); |
2559 | |
2560 | return result; |
2561 | } |
2562 | |
2563 | /* Allocate a new typmod number. This will be wasted if we error out. */ |
2564 | typmod = (int) |
2565 | pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod, |
2566 | 1); |
2567 | |
2568 | /* Copy the TupleDesc into shared memory. */ |
2569 | shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod); |
2570 | |
2571 | /* |
2572 | * Create an entry in the typmod table so that others will understand this |
2573 | * typmod number. |
2574 | */ |
2575 | PG_TRY(); |
2576 | { |
2577 | typmod_table_entry = (SharedTypmodTableEntry *) |
2578 | dshash_find_or_insert(CurrentSession->shared_typmod_table, |
2579 | &typmod, &found); |
2580 | if (found) |
2581 | elog(ERROR, "cannot create duplicate shared record typmod" ); |
2582 | } |
2583 | PG_CATCH(); |
2584 | { |
2585 | dsa_free(CurrentSession->area, shared_dp); |
2586 | PG_RE_THROW(); |
2587 | } |
2588 | PG_END_TRY(); |
2589 | typmod_table_entry->typmod = typmod; |
2590 | typmod_table_entry->shared_tupdesc = shared_dp; |
2591 | dshash_release_lock(CurrentSession->shared_typmod_table, |
2592 | typmod_table_entry); |
2593 | |
2594 | /* |
2595 | * Finally create an entry in the record table so others with matching |
2596 | * tuple descriptors can reuse the typmod. |
2597 | */ |
2598 | record_table_entry = (SharedRecordTableEntry *) |
2599 | dshash_find_or_insert(CurrentSession->shared_record_table, &key, |
2600 | &found); |
2601 | if (found) |
2602 | { |
2603 | /* |
2604 | * Someone concurrently inserted a matching tuple descriptor since the |
2605 | * first time we checked. Use that one instead. |
2606 | */ |
2607 | dshash_release_lock(CurrentSession->shared_record_table, |
2608 | record_table_entry); |
2609 | |
2610 | /* Might as well free up the space used by the one we created. */ |
2611 | found = dshash_delete_key(CurrentSession->shared_typmod_table, |
2612 | &typmod); |
2613 | Assert(found); |
2614 | dsa_free(CurrentSession->area, shared_dp); |
2615 | |
2616 | /* Return the one we found. */ |
2617 | Assert(record_table_entry->key.shared); |
2618 | result = (TupleDesc) |
2619 | dsa_get_address(CurrentSession->area, |
2620 | record_table_entry->key.shared); |
2621 | Assert(result->tdrefcount == -1); |
2622 | |
2623 | return result; |
2624 | } |
2625 | |
2626 | /* Store it and return it. */ |
2627 | record_table_entry->key.shared = true; |
2628 | record_table_entry->key.u.shared_tupdesc = shared_dp; |
2629 | dshash_release_lock(CurrentSession->shared_record_table, |
2630 | record_table_entry); |
2631 | result = (TupleDesc) |
2632 | dsa_get_address(CurrentSession->area, shared_dp); |
2633 | Assert(result->tdrefcount == -1); |
2634 | |
2635 | return result; |
2636 | } |
2637 | |
2638 | /* |
2639 | * On-DSM-detach hook to forget about the current shared record typmod |
2640 | * infrastructure. This is currently used by both leader and workers. |
2641 | */ |
2642 | static void |
2643 | shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum) |
2644 | { |
2645 | /* Be cautious here: maybe we didn't finish initializing. */ |
2646 | if (CurrentSession->shared_record_table != NULL) |
2647 | { |
2648 | dshash_detach(CurrentSession->shared_record_table); |
2649 | CurrentSession->shared_record_table = NULL; |
2650 | } |
2651 | if (CurrentSession->shared_typmod_table != NULL) |
2652 | { |
2653 | dshash_detach(CurrentSession->shared_typmod_table); |
2654 | CurrentSession->shared_typmod_table = NULL; |
2655 | } |
2656 | CurrentSession->shared_typmod_registry = NULL; |
2657 | } |
2658 | |