| 1 | /* |
| 2 | * info.c |
| 3 | * |
| 4 | * information support functions |
| 5 | * |
| 6 | * Copyright (c) 2010-2019, PostgreSQL Global Development Group |
| 7 | * src/bin/pg_upgrade/info.c |
| 8 | */ |
| 9 | |
| 10 | #include "postgres_fe.h" |
| 11 | |
| 12 | #include "pg_upgrade.h" |
| 13 | |
| 14 | #include "access/transam.h" |
| 15 | #include "catalog/pg_class_d.h" |
| 16 | |
| 17 | |
| 18 | static void create_rel_filename_map(const char *old_data, const char *new_data, |
| 19 | const DbInfo *old_db, const DbInfo *new_db, |
| 20 | const RelInfo *old_rel, const RelInfo *new_rel, |
| 21 | FileNameMap *map); |
| 22 | static void report_unmatched_relation(const RelInfo *rel, const DbInfo *db, |
| 23 | bool is_new_db); |
| 24 | static void free_db_and_rel_infos(DbInfoArr *db_arr); |
| 25 | static void get_db_infos(ClusterInfo *cluster); |
| 26 | static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo); |
| 27 | static void free_rel_infos(RelInfoArr *rel_arr); |
| 28 | static void print_db_infos(DbInfoArr *dbinfo); |
| 29 | static void print_rel_infos(RelInfoArr *rel_arr); |
| 30 | |
| 31 | |
| 32 | /* |
| 33 | * gen_db_file_maps() |
| 34 | * |
| 35 | * generates a database mapping from "old_db" to "new_db". |
| 36 | * |
| 37 | * Returns a malloc'ed array of mappings. The length of the array |
| 38 | * is returned into *nmaps. |
| 39 | */ |
| 40 | FileNameMap * |
| 41 | gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, |
| 42 | int *nmaps, |
| 43 | const char *old_pgdata, const char *new_pgdata) |
| 44 | { |
| 45 | FileNameMap *maps; |
| 46 | int old_relnum, |
| 47 | new_relnum; |
| 48 | int num_maps = 0; |
| 49 | bool all_matched = true; |
| 50 | |
| 51 | /* There will certainly not be more mappings than there are old rels */ |
| 52 | maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) * |
| 53 | old_db->rel_arr.nrels); |
| 54 | |
| 55 | /* |
| 56 | * Each of the RelInfo arrays should be sorted by OID. Scan through them |
| 57 | * and match them up. If we fail to match everything, we'll abort, but |
| 58 | * first print as much info as we can about mismatches. |
| 59 | */ |
| 60 | old_relnum = new_relnum = 0; |
| 61 | while (old_relnum < old_db->rel_arr.nrels || |
| 62 | new_relnum < new_db->rel_arr.nrels) |
| 63 | { |
| 64 | RelInfo *old_rel = (old_relnum < old_db->rel_arr.nrels) ? |
| 65 | &old_db->rel_arr.rels[old_relnum] : NULL; |
| 66 | RelInfo *new_rel = (new_relnum < new_db->rel_arr.nrels) ? |
| 67 | &new_db->rel_arr.rels[new_relnum] : NULL; |
| 68 | |
| 69 | /* handle running off one array before the other */ |
| 70 | if (!new_rel) |
| 71 | { |
| 72 | /* |
| 73 | * old_rel is unmatched. This should never happen, because we |
| 74 | * force new rels to have TOAST tables if the old one did. |
| 75 | */ |
| 76 | report_unmatched_relation(old_rel, old_db, false); |
| 77 | all_matched = false; |
| 78 | old_relnum++; |
| 79 | continue; |
| 80 | } |
| 81 | if (!old_rel) |
| 82 | { |
| 83 | /* |
| 84 | * new_rel is unmatched. This shouldn't really happen either, but |
| 85 | * if it's a TOAST table, we can ignore it and continue |
| 86 | * processing, assuming that the new server made a TOAST table |
| 87 | * that wasn't needed. |
| 88 | */ |
| 89 | if (strcmp(new_rel->nspname, "pg_toast" ) != 0) |
| 90 | { |
| 91 | report_unmatched_relation(new_rel, new_db, true); |
| 92 | all_matched = false; |
| 93 | } |
| 94 | new_relnum++; |
| 95 | continue; |
| 96 | } |
| 97 | |
| 98 | /* check for mismatched OID */ |
| 99 | if (old_rel->reloid < new_rel->reloid) |
| 100 | { |
| 101 | /* old_rel is unmatched, see comment above */ |
| 102 | report_unmatched_relation(old_rel, old_db, false); |
| 103 | all_matched = false; |
| 104 | old_relnum++; |
| 105 | continue; |
| 106 | } |
| 107 | else if (old_rel->reloid > new_rel->reloid) |
| 108 | { |
| 109 | /* new_rel is unmatched, see comment above */ |
| 110 | if (strcmp(new_rel->nspname, "pg_toast" ) != 0) |
| 111 | { |
| 112 | report_unmatched_relation(new_rel, new_db, true); |
| 113 | all_matched = false; |
| 114 | } |
| 115 | new_relnum++; |
| 116 | continue; |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * Verify that rels of same OID have same name. The namespace name |
| 121 | * should always match, but the relname might not match for TOAST |
| 122 | * tables (and, therefore, their indexes). |
| 123 | * |
| 124 | * TOAST table names initially match the heap pg_class oid, but |
| 125 | * pre-9.0 they can change during certain commands such as CLUSTER, so |
| 126 | * don't insist on a match if old cluster is < 9.0. |
| 127 | */ |
| 128 | if (strcmp(old_rel->nspname, new_rel->nspname) != 0 || |
| 129 | (strcmp(old_rel->relname, new_rel->relname) != 0 && |
| 130 | (GET_MAJOR_VERSION(old_cluster.major_version) >= 900 || |
| 131 | strcmp(old_rel->nspname, "pg_toast" ) != 0))) |
| 132 | { |
| 133 | pg_log(PG_WARNING, "Relation names for OID %u in database \"%s\" do not match: " |
| 134 | "old name \"%s.%s\", new name \"%s.%s\"\n" , |
| 135 | old_rel->reloid, old_db->db_name, |
| 136 | old_rel->nspname, old_rel->relname, |
| 137 | new_rel->nspname, new_rel->relname); |
| 138 | all_matched = false; |
| 139 | old_relnum++; |
| 140 | new_relnum++; |
| 141 | continue; |
| 142 | } |
| 143 | |
| 144 | /* OK, create a mapping entry */ |
| 145 | create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db, |
| 146 | old_rel, new_rel, maps + num_maps); |
| 147 | num_maps++; |
| 148 | old_relnum++; |
| 149 | new_relnum++; |
| 150 | } |
| 151 | |
| 152 | if (!all_matched) |
| 153 | pg_fatal("Failed to match up old and new tables in database \"%s\"\n" , |
| 154 | old_db->db_name); |
| 155 | |
| 156 | *nmaps = num_maps; |
| 157 | return maps; |
| 158 | } |
| 159 | |
| 160 | |
| 161 | /* |
| 162 | * create_rel_filename_map() |
| 163 | * |
| 164 | * fills a file node map structure and returns it in "map". |
| 165 | */ |
| 166 | static void |
| 167 | create_rel_filename_map(const char *old_data, const char *new_data, |
| 168 | const DbInfo *old_db, const DbInfo *new_db, |
| 169 | const RelInfo *old_rel, const RelInfo *new_rel, |
| 170 | FileNameMap *map) |
| 171 | { |
| 172 | /* In case old/new tablespaces don't match, do them separately. */ |
| 173 | if (strlen(old_rel->tablespace) == 0) |
| 174 | { |
| 175 | /* |
| 176 | * relation belongs to the default tablespace, hence relfiles should |
| 177 | * exist in the data directories. |
| 178 | */ |
| 179 | map->old_tablespace = old_data; |
| 180 | map->old_tablespace_suffix = "/base" ; |
| 181 | } |
| 182 | else |
| 183 | { |
| 184 | /* relation belongs to a tablespace, so use the tablespace location */ |
| 185 | map->old_tablespace = old_rel->tablespace; |
| 186 | map->old_tablespace_suffix = old_cluster.tablespace_suffix; |
| 187 | } |
| 188 | |
| 189 | /* Do the same for new tablespaces */ |
| 190 | if (strlen(new_rel->tablespace) == 0) |
| 191 | { |
| 192 | map->new_tablespace = new_data; |
| 193 | map->new_tablespace_suffix = "/base" ; |
| 194 | } |
| 195 | else |
| 196 | { |
| 197 | map->new_tablespace = new_rel->tablespace; |
| 198 | map->new_tablespace_suffix = new_cluster.tablespace_suffix; |
| 199 | } |
| 200 | |
| 201 | map->old_db_oid = old_db->db_oid; |
| 202 | map->new_db_oid = new_db->db_oid; |
| 203 | |
| 204 | /* |
| 205 | * old_relfilenode might differ from pg_class.oid (and hence |
| 206 | * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL. |
| 207 | */ |
| 208 | map->old_relfilenode = old_rel->relfilenode; |
| 209 | |
| 210 | /* new_relfilenode will match old and new pg_class.oid */ |
| 211 | map->new_relfilenode = new_rel->relfilenode; |
| 212 | |
| 213 | /* used only for logging and error reporting, old/new are identical */ |
| 214 | map->nspname = old_rel->nspname; |
| 215 | map->relname = old_rel->relname; |
| 216 | } |
| 217 | |
| 218 | |
| 219 | /* |
| 220 | * Complain about a relation we couldn't match to the other database, |
| 221 | * identifying it as best we can. |
| 222 | */ |
| 223 | static void |
| 224 | report_unmatched_relation(const RelInfo *rel, const DbInfo *db, bool is_new_db) |
| 225 | { |
| 226 | Oid reloid = rel->reloid; /* we might change rel below */ |
| 227 | char reldesc[1000]; |
| 228 | int i; |
| 229 | |
| 230 | snprintf(reldesc, sizeof(reldesc), "\"%s.%s\"" , |
| 231 | rel->nspname, rel->relname); |
| 232 | if (rel->indtable) |
| 233 | { |
| 234 | for (i = 0; i < db->rel_arr.nrels; i++) |
| 235 | { |
| 236 | const RelInfo *hrel = &db->rel_arr.rels[i]; |
| 237 | |
| 238 | if (hrel->reloid == rel->indtable) |
| 239 | { |
| 240 | snprintf(reldesc + strlen(reldesc), |
| 241 | sizeof(reldesc) - strlen(reldesc), |
| 242 | _(" which is an index on \"%s.%s\"" ), |
| 243 | hrel->nspname, hrel->relname); |
| 244 | /* Shift attention to index's table for toast check */ |
| 245 | rel = hrel; |
| 246 | break; |
| 247 | } |
| 248 | } |
| 249 | if (i >= db->rel_arr.nrels) |
| 250 | snprintf(reldesc + strlen(reldesc), |
| 251 | sizeof(reldesc) - strlen(reldesc), |
| 252 | _(" which is an index on OID %u" ), rel->indtable); |
| 253 | } |
| 254 | if (rel->toastheap) |
| 255 | { |
| 256 | for (i = 0; i < db->rel_arr.nrels; i++) |
| 257 | { |
| 258 | const RelInfo *brel = &db->rel_arr.rels[i]; |
| 259 | |
| 260 | if (brel->reloid == rel->toastheap) |
| 261 | { |
| 262 | snprintf(reldesc + strlen(reldesc), |
| 263 | sizeof(reldesc) - strlen(reldesc), |
| 264 | _(" which is the TOAST table for \"%s.%s\"" ), |
| 265 | brel->nspname, brel->relname); |
| 266 | break; |
| 267 | } |
| 268 | } |
| 269 | if (i >= db->rel_arr.nrels) |
| 270 | snprintf(reldesc + strlen(reldesc), |
| 271 | sizeof(reldesc) - strlen(reldesc), |
| 272 | _(" which is the TOAST table for OID %u" ), rel->toastheap); |
| 273 | } |
| 274 | |
| 275 | if (is_new_db) |
| 276 | pg_log(PG_WARNING, "No match found in old cluster for new relation with OID %u in database \"%s\": %s\n" , |
| 277 | reloid, db->db_name, reldesc); |
| 278 | else |
| 279 | pg_log(PG_WARNING, "No match found in new cluster for old relation with OID %u in database \"%s\": %s\n" , |
| 280 | reloid, db->db_name, reldesc); |
| 281 | } |
| 282 | |
| 283 | |
| 284 | void |
| 285 | print_maps(FileNameMap *maps, int n_maps, const char *db_name) |
| 286 | { |
| 287 | if (log_opts.verbose) |
| 288 | { |
| 289 | int mapnum; |
| 290 | |
| 291 | pg_log(PG_VERBOSE, "mappings for database \"%s\":\n" , db_name); |
| 292 | |
| 293 | for (mapnum = 0; mapnum < n_maps; mapnum++) |
| 294 | pg_log(PG_VERBOSE, "%s.%s: %u to %u\n" , |
| 295 | maps[mapnum].nspname, maps[mapnum].relname, |
| 296 | maps[mapnum].old_relfilenode, |
| 297 | maps[mapnum].new_relfilenode); |
| 298 | |
| 299 | pg_log(PG_VERBOSE, "\n\n" ); |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | |
| 304 | /* |
| 305 | * get_db_and_rel_infos() |
| 306 | * |
| 307 | * higher level routine to generate dbinfos for the database running |
| 308 | * on the given "port". Assumes that server is already running. |
| 309 | */ |
| 310 | void |
| 311 | get_db_and_rel_infos(ClusterInfo *cluster) |
| 312 | { |
| 313 | int dbnum; |
| 314 | |
| 315 | if (cluster->dbarr.dbs != NULL) |
| 316 | free_db_and_rel_infos(&cluster->dbarr); |
| 317 | |
| 318 | get_db_infos(cluster); |
| 319 | |
| 320 | for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) |
| 321 | get_rel_infos(cluster, &cluster->dbarr.dbs[dbnum]); |
| 322 | |
| 323 | if (cluster == &old_cluster) |
| 324 | pg_log(PG_VERBOSE, "\nsource databases:\n" ); |
| 325 | else |
| 326 | pg_log(PG_VERBOSE, "\ntarget databases:\n" ); |
| 327 | |
| 328 | if (log_opts.verbose) |
| 329 | print_db_infos(&cluster->dbarr); |
| 330 | } |
| 331 | |
| 332 | |
| 333 | /* |
| 334 | * get_db_infos() |
| 335 | * |
| 336 | * Scans pg_database system catalog and populates all user |
| 337 | * databases. |
| 338 | */ |
| 339 | static void |
| 340 | get_db_infos(ClusterInfo *cluster) |
| 341 | { |
| 342 | PGconn *conn = connectToServer(cluster, "template1" ); |
| 343 | PGresult *res; |
| 344 | int ntups; |
| 345 | int tupnum; |
| 346 | DbInfo *dbinfos; |
| 347 | int i_datname, |
| 348 | i_oid, |
| 349 | i_encoding, |
| 350 | i_datcollate, |
| 351 | i_datctype, |
| 352 | i_spclocation; |
| 353 | char query[QUERY_ALLOC]; |
| 354 | |
| 355 | snprintf(query, sizeof(query), |
| 356 | "SELECT d.oid, d.datname, d.encoding, d.datcollate, d.datctype, " |
| 357 | "%s AS spclocation " |
| 358 | "FROM pg_catalog.pg_database d " |
| 359 | " LEFT OUTER JOIN pg_catalog.pg_tablespace t " |
| 360 | " ON d.dattablespace = t.oid " |
| 361 | "WHERE d.datallowconn = true " |
| 362 | /* we don't preserve pg_database.oid so we sort by name */ |
| 363 | "ORDER BY 2" , |
| 364 | /* 9.2 removed the spclocation column */ |
| 365 | (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? |
| 366 | "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid)" ); |
| 367 | |
| 368 | res = executeQueryOrDie(conn, "%s" , query); |
| 369 | |
| 370 | i_oid = PQfnumber(res, "oid" ); |
| 371 | i_datname = PQfnumber(res, "datname" ); |
| 372 | i_encoding = PQfnumber(res, "encoding" ); |
| 373 | i_datcollate = PQfnumber(res, "datcollate" ); |
| 374 | i_datctype = PQfnumber(res, "datctype" ); |
| 375 | i_spclocation = PQfnumber(res, "spclocation" ); |
| 376 | |
| 377 | ntups = PQntuples(res); |
| 378 | dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups); |
| 379 | |
| 380 | for (tupnum = 0; tupnum < ntups; tupnum++) |
| 381 | { |
| 382 | dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid)); |
| 383 | dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname)); |
| 384 | dbinfos[tupnum].db_encoding = atoi(PQgetvalue(res, tupnum, i_encoding)); |
| 385 | dbinfos[tupnum].db_collate = pg_strdup(PQgetvalue(res, tupnum, i_datcollate)); |
| 386 | dbinfos[tupnum].db_ctype = pg_strdup(PQgetvalue(res, tupnum, i_datctype)); |
| 387 | snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s" , |
| 388 | PQgetvalue(res, tupnum, i_spclocation)); |
| 389 | } |
| 390 | PQclear(res); |
| 391 | |
| 392 | PQfinish(conn); |
| 393 | |
| 394 | cluster->dbarr.dbs = dbinfos; |
| 395 | cluster->dbarr.ndbs = ntups; |
| 396 | } |
| 397 | |
| 398 | |
| 399 | /* |
| 400 | * get_rel_infos() |
| 401 | * |
| 402 | * gets the relinfos for all the user tables and indexes of the database |
| 403 | * referred to by "dbinfo". |
| 404 | * |
| 405 | * Note: the resulting RelInfo array is assumed to be sorted by OID. |
| 406 | * This allows later processing to match up old and new databases efficiently. |
| 407 | */ |
| 408 | static void |
| 409 | get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) |
| 410 | { |
| 411 | PGconn *conn = connectToServer(cluster, |
| 412 | dbinfo->db_name); |
| 413 | PGresult *res; |
| 414 | RelInfo *relinfos; |
| 415 | int ntups; |
| 416 | int relnum; |
| 417 | int num_rels = 0; |
| 418 | char *nspname = NULL; |
| 419 | char *relname = NULL; |
| 420 | char *tablespace = NULL; |
| 421 | int i_spclocation, |
| 422 | i_nspname, |
| 423 | i_relname, |
| 424 | i_reloid, |
| 425 | i_indtable, |
| 426 | i_toastheap, |
| 427 | i_relfilenode, |
| 428 | i_reltablespace; |
| 429 | char query[QUERY_ALLOC]; |
| 430 | char *last_namespace = NULL, |
| 431 | *last_tablespace = NULL; |
| 432 | |
| 433 | query[0] = '\0'; /* initialize query string to empty */ |
| 434 | |
| 435 | /* |
| 436 | * Create a CTE that collects OIDs of regular user tables, including |
| 437 | * matviews and sequences, but excluding toast tables and indexes. We |
| 438 | * assume that relations with OIDs >= FirstNormalObjectId belong to the |
| 439 | * user. (That's probably redundant with the namespace-name exclusions, |
| 440 | * but let's be safe.) |
| 441 | * |
| 442 | * pg_largeobject contains user data that does not appear in pg_dump |
| 443 | * output, so we have to copy that system table. It's easiest to do that |
| 444 | * by treating it as a user table. |
| 445 | */ |
| 446 | snprintf(query + strlen(query), sizeof(query) - strlen(query), |
| 447 | "WITH regular_heap (reloid, indtable, toastheap) AS ( " |
| 448 | " SELECT c.oid, 0::oid, 0::oid " |
| 449 | " FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " |
| 450 | " ON c.relnamespace = n.oid " |
| 451 | " WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ", " |
| 452 | CppAsString2(RELKIND_MATVIEW) ") AND " |
| 453 | /* exclude possible orphaned temp tables */ |
| 454 | " ((n.nspname !~ '^pg_temp_' AND " |
| 455 | " n.nspname !~ '^pg_toast_temp_' AND " |
| 456 | " n.nspname NOT IN ('pg_catalog', 'information_schema', " |
| 457 | " 'binary_upgrade', 'pg_toast') AND " |
| 458 | " c.oid >= %u::pg_catalog.oid) OR " |
| 459 | " (n.nspname = 'pg_catalog' AND " |
| 460 | " relname IN ('pg_largeobject') ))), " , |
| 461 | FirstNormalObjectId); |
| 462 | |
| 463 | /* |
| 464 | * Add a CTE that collects OIDs of toast tables belonging to the tables |
| 465 | * selected by the regular_heap CTE. (We have to do this separately |
| 466 | * because the namespace-name rules above don't work for toast tables.) |
| 467 | */ |
| 468 | snprintf(query + strlen(query), sizeof(query) - strlen(query), |
| 469 | " toast_heap (reloid, indtable, toastheap) AS ( " |
| 470 | " SELECT c.reltoastrelid, 0::oid, c.oid " |
| 471 | " FROM regular_heap JOIN pg_catalog.pg_class c " |
| 472 | " ON regular_heap.reloid = c.oid " |
| 473 | " WHERE c.reltoastrelid != 0), " ); |
| 474 | |
| 475 | /* |
| 476 | * Add a CTE that collects OIDs of all valid indexes on the previously |
| 477 | * selected tables. We can ignore invalid indexes since pg_dump does. |
| 478 | * Testing indisready is necessary in 9.2, and harmless in earlier/later |
| 479 | * versions. |
| 480 | */ |
| 481 | snprintf(query + strlen(query), sizeof(query) - strlen(query), |
| 482 | " all_index (reloid, indtable, toastheap) AS ( " |
| 483 | " SELECT indexrelid, indrelid, 0::oid " |
| 484 | " FROM pg_catalog.pg_index " |
| 485 | " WHERE indisvalid AND indisready " |
| 486 | " AND indrelid IN " |
| 487 | " (SELECT reloid FROM regular_heap " |
| 488 | " UNION ALL " |
| 489 | " SELECT reloid FROM toast_heap)) " ); |
| 490 | |
| 491 | /* |
| 492 | * And now we can write the query that retrieves the data we want for each |
| 493 | * heap and index relation. Make sure result is sorted by OID. |
| 494 | */ |
| 495 | snprintf(query + strlen(query), sizeof(query) - strlen(query), |
| 496 | "SELECT all_rels.*, n.nspname, c.relname, " |
| 497 | " c.relfilenode, c.reltablespace, %s " |
| 498 | "FROM (SELECT * FROM regular_heap " |
| 499 | " UNION ALL " |
| 500 | " SELECT * FROM toast_heap " |
| 501 | " UNION ALL " |
| 502 | " SELECT * FROM all_index) all_rels " |
| 503 | " JOIN pg_catalog.pg_class c " |
| 504 | " ON all_rels.reloid = c.oid " |
| 505 | " JOIN pg_catalog.pg_namespace n " |
| 506 | " ON c.relnamespace = n.oid " |
| 507 | " LEFT OUTER JOIN pg_catalog.pg_tablespace t " |
| 508 | " ON c.reltablespace = t.oid " |
| 509 | "ORDER BY 1;" , |
| 510 | /* 9.2 removed the pg_tablespace.spclocation column */ |
| 511 | (GET_MAJOR_VERSION(cluster->major_version) >= 902) ? |
| 512 | "pg_catalog.pg_tablespace_location(t.oid) AS spclocation" : |
| 513 | "t.spclocation" ); |
| 514 | |
| 515 | res = executeQueryOrDie(conn, "%s" , query); |
| 516 | |
| 517 | ntups = PQntuples(res); |
| 518 | |
| 519 | relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups); |
| 520 | |
| 521 | i_reloid = PQfnumber(res, "reloid" ); |
| 522 | i_indtable = PQfnumber(res, "indtable" ); |
| 523 | i_toastheap = PQfnumber(res, "toastheap" ); |
| 524 | i_nspname = PQfnumber(res, "nspname" ); |
| 525 | i_relname = PQfnumber(res, "relname" ); |
| 526 | i_relfilenode = PQfnumber(res, "relfilenode" ); |
| 527 | i_reltablespace = PQfnumber(res, "reltablespace" ); |
| 528 | i_spclocation = PQfnumber(res, "spclocation" ); |
| 529 | |
| 530 | for (relnum = 0; relnum < ntups; relnum++) |
| 531 | { |
| 532 | RelInfo *curr = &relinfos[num_rels++]; |
| 533 | |
| 534 | curr->reloid = atooid(PQgetvalue(res, relnum, i_reloid)); |
| 535 | curr->indtable = atooid(PQgetvalue(res, relnum, i_indtable)); |
| 536 | curr->toastheap = atooid(PQgetvalue(res, relnum, i_toastheap)); |
| 537 | |
| 538 | nspname = PQgetvalue(res, relnum, i_nspname); |
| 539 | curr->nsp_alloc = false; |
| 540 | |
| 541 | /* |
| 542 | * Many of the namespace and tablespace strings are identical, so we |
| 543 | * try to reuse the allocated string pointers where possible to reduce |
| 544 | * memory consumption. |
| 545 | */ |
| 546 | /* Can we reuse the previous string allocation? */ |
| 547 | if (last_namespace && strcmp(nspname, last_namespace) == 0) |
| 548 | curr->nspname = last_namespace; |
| 549 | else |
| 550 | { |
| 551 | last_namespace = curr->nspname = pg_strdup(nspname); |
| 552 | curr->nsp_alloc = true; |
| 553 | } |
| 554 | |
| 555 | relname = PQgetvalue(res, relnum, i_relname); |
| 556 | curr->relname = pg_strdup(relname); |
| 557 | |
| 558 | curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode)); |
| 559 | curr->tblsp_alloc = false; |
| 560 | |
| 561 | /* Is the tablespace oid non-default? */ |
| 562 | if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0) |
| 563 | { |
| 564 | /* |
| 565 | * The tablespace location might be "", meaning the cluster |
| 566 | * default location, i.e. pg_default or pg_global. |
| 567 | */ |
| 568 | tablespace = PQgetvalue(res, relnum, i_spclocation); |
| 569 | |
| 570 | /* Can we reuse the previous string allocation? */ |
| 571 | if (last_tablespace && strcmp(tablespace, last_tablespace) == 0) |
| 572 | curr->tablespace = last_tablespace; |
| 573 | else |
| 574 | { |
| 575 | last_tablespace = curr->tablespace = pg_strdup(tablespace); |
| 576 | curr->tblsp_alloc = true; |
| 577 | } |
| 578 | } |
| 579 | else |
| 580 | /* A zero reltablespace oid indicates the database tablespace. */ |
| 581 | curr->tablespace = dbinfo->db_tablespace; |
| 582 | } |
| 583 | PQclear(res); |
| 584 | |
| 585 | PQfinish(conn); |
| 586 | |
| 587 | dbinfo->rel_arr.rels = relinfos; |
| 588 | dbinfo->rel_arr.nrels = num_rels; |
| 589 | } |
| 590 | |
| 591 | |
| 592 | static void |
| 593 | free_db_and_rel_infos(DbInfoArr *db_arr) |
| 594 | { |
| 595 | int dbnum; |
| 596 | |
| 597 | for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++) |
| 598 | { |
| 599 | free_rel_infos(&db_arr->dbs[dbnum].rel_arr); |
| 600 | pg_free(db_arr->dbs[dbnum].db_name); |
| 601 | } |
| 602 | pg_free(db_arr->dbs); |
| 603 | db_arr->dbs = NULL; |
| 604 | db_arr->ndbs = 0; |
| 605 | } |
| 606 | |
| 607 | |
| 608 | static void |
| 609 | free_rel_infos(RelInfoArr *rel_arr) |
| 610 | { |
| 611 | int relnum; |
| 612 | |
| 613 | for (relnum = 0; relnum < rel_arr->nrels; relnum++) |
| 614 | { |
| 615 | if (rel_arr->rels[relnum].nsp_alloc) |
| 616 | pg_free(rel_arr->rels[relnum].nspname); |
| 617 | pg_free(rel_arr->rels[relnum].relname); |
| 618 | if (rel_arr->rels[relnum].tblsp_alloc) |
| 619 | pg_free(rel_arr->rels[relnum].tablespace); |
| 620 | } |
| 621 | pg_free(rel_arr->rels); |
| 622 | rel_arr->nrels = 0; |
| 623 | } |
| 624 | |
| 625 | |
| 626 | static void |
| 627 | print_db_infos(DbInfoArr *db_arr) |
| 628 | { |
| 629 | int dbnum; |
| 630 | |
| 631 | for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++) |
| 632 | { |
| 633 | pg_log(PG_VERBOSE, "Database: %s\n" , db_arr->dbs[dbnum].db_name); |
| 634 | print_rel_infos(&db_arr->dbs[dbnum].rel_arr); |
| 635 | pg_log(PG_VERBOSE, "\n\n" ); |
| 636 | } |
| 637 | } |
| 638 | |
| 639 | |
| 640 | static void |
| 641 | print_rel_infos(RelInfoArr *rel_arr) |
| 642 | { |
| 643 | int relnum; |
| 644 | |
| 645 | for (relnum = 0; relnum < rel_arr->nrels; relnum++) |
| 646 | pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n" , |
| 647 | rel_arr->rels[relnum].nspname, |
| 648 | rel_arr->rels[relnum].relname, |
| 649 | rel_arr->rels[relnum].reloid, |
| 650 | rel_arr->rels[relnum].tablespace); |
| 651 | } |
| 652 | |