1 | /* |
2 | * pg_upgrade.c |
3 | * |
4 | * main source file |
5 | * |
6 | * Copyright (c) 2010-2019, PostgreSQL Global Development Group |
7 | * src/bin/pg_upgrade/pg_upgrade.c |
8 | */ |
9 | |
10 | /* |
11 | * To simplify the upgrade process, we force certain system values to be |
12 | * identical between old and new clusters: |
13 | * |
14 | * We control all assignments of pg_class.oid (and relfilenode) so toast |
15 | * oids are the same between old and new clusters. This is important |
16 | * because toast oids are stored as toast pointers in user tables. |
17 | * |
18 | * While pg_class.oid and pg_class.relfilenode are initially the same |
19 | * in a cluster, they can diverge due to CLUSTER, REINDEX, or VACUUM |
20 | * FULL. In the new cluster, pg_class.oid and pg_class.relfilenode will |
21 | * be the same and will match the old pg_class.oid value. Because of |
22 | * this, old/new pg_class.relfilenode values will not match if CLUSTER, |
23 | * REINDEX, or VACUUM FULL have been performed in the old cluster. |
24 | * |
25 | * We control all assignments of pg_type.oid because these oids are stored |
26 | * in user composite type values. |
27 | * |
28 | * We control all assignments of pg_enum.oid because these oids are stored |
29 | * in user tables as enum values. |
30 | * |
31 | * We control all assignments of pg_authid.oid for historical reasons (the |
32 | * oids used to be stored in pg_largeobject_metadata, which is now copied via |
33 | * SQL commands), that might change at some point in the future. |
34 | */ |
35 | |
36 | |
37 | |
38 | #include "postgres_fe.h" |
39 | |
40 | #include "pg_upgrade.h" |
41 | #include "catalog/pg_class_d.h" |
42 | #include "common/file_perm.h" |
43 | #include "common/logging.h" |
44 | #include "common/restricted_token.h" |
45 | #include "fe_utils/string_utils.h" |
46 | |
47 | #ifdef HAVE_LANGINFO_H |
48 | #include <langinfo.h> |
49 | #endif |
50 | |
51 | static void prepare_new_cluster(void); |
52 | static void prepare_new_globals(void); |
53 | static void create_new_objects(void); |
54 | static void copy_xact_xlog_xid(void); |
55 | static void set_frozenxids(bool minmxid_only); |
56 | static void setup(char *argv0, bool *live_check); |
57 | static void cleanup(void); |
58 | |
59 | ClusterInfo old_cluster, |
60 | new_cluster; |
61 | OSInfo os_info; |
62 | |
63 | char *output_files[] = { |
64 | SERVER_LOG_FILE, |
65 | #ifdef WIN32 |
66 | /* unique file for pg_ctl start */ |
67 | SERVER_START_LOG_FILE, |
68 | #endif |
69 | UTILITY_LOG_FILE, |
70 | INTERNAL_LOG_FILE, |
71 | NULL |
72 | }; |
73 | |
74 | |
75 | int |
76 | main(int argc, char **argv) |
77 | { |
78 | char *analyze_script_file_name = NULL; |
79 | char *deletion_script_file_name = NULL; |
80 | bool live_check = false; |
81 | |
82 | pg_logging_init(argv[0]); |
83 | set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_upgrade" )); |
84 | |
85 | /* Set default restrictive mask until new cluster permissions are read */ |
86 | umask(PG_MODE_MASK_OWNER); |
87 | |
88 | parseCommandLine(argc, argv); |
89 | |
90 | get_restricted_token(); |
91 | |
92 | adjust_data_dir(&old_cluster); |
93 | adjust_data_dir(&new_cluster); |
94 | |
95 | setup(argv[0], &live_check); |
96 | |
97 | output_check_banner(live_check); |
98 | |
99 | check_cluster_versions(); |
100 | |
101 | get_sock_dir(&old_cluster, live_check); |
102 | get_sock_dir(&new_cluster, false); |
103 | |
104 | check_cluster_compatibility(live_check); |
105 | |
106 | /* Set mask based on PGDATA permissions */ |
107 | if (!GetDataDirectoryCreatePerm(new_cluster.pgdata)) |
108 | { |
109 | pg_log(PG_FATAL, "could not read permissions of directory \"%s\": %s\n" , |
110 | new_cluster.pgdata, strerror(errno)); |
111 | exit(1); |
112 | } |
113 | |
114 | umask(pg_mode_mask); |
115 | |
116 | check_and_dump_old_cluster(live_check); |
117 | |
118 | |
119 | /* -- NEW -- */ |
120 | start_postmaster(&new_cluster, true); |
121 | |
122 | check_new_cluster(); |
123 | report_clusters_compatible(); |
124 | |
125 | pg_log(PG_REPORT, |
126 | "\n" |
127 | "Performing Upgrade\n" |
128 | "------------------\n" ); |
129 | |
130 | prepare_new_cluster(); |
131 | |
132 | stop_postmaster(false); |
133 | |
134 | /* |
135 | * Destructive Changes to New Cluster |
136 | */ |
137 | |
138 | copy_xact_xlog_xid(); |
139 | |
140 | /* New now using xids of the old system */ |
141 | |
142 | /* -- NEW -- */ |
143 | start_postmaster(&new_cluster, true); |
144 | |
145 | prepare_new_globals(); |
146 | |
147 | create_new_objects(); |
148 | |
149 | stop_postmaster(false); |
150 | |
151 | /* |
152 | * Most failures happen in create_new_objects(), which has completed at |
153 | * this point. We do this here because it is just before linking, which |
154 | * will link the old and new cluster data files, preventing the old |
155 | * cluster from being safely started once the new cluster is started. |
156 | */ |
157 | if (user_opts.transfer_mode == TRANSFER_MODE_LINK) |
158 | disable_old_cluster(); |
159 | |
160 | transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr, |
161 | old_cluster.pgdata, new_cluster.pgdata); |
162 | |
163 | /* |
164 | * Assuming OIDs are only used in system tables, there is no need to |
165 | * restore the OID counter because we have not transferred any OIDs from |
166 | * the old system, but we do it anyway just in case. We do it late here |
167 | * because there is no need to have the schema load use new oids. |
168 | */ |
169 | prep_status("Setting next OID for new cluster" ); |
170 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
171 | "\"%s/pg_resetwal\" -o %u \"%s\"" , |
172 | new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid, |
173 | new_cluster.pgdata); |
174 | check_ok(); |
175 | |
176 | prep_status("Sync data directory to disk" ); |
177 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
178 | "\"%s/initdb\" --sync-only \"%s\"" , new_cluster.bindir, |
179 | new_cluster.pgdata); |
180 | check_ok(); |
181 | |
182 | create_script_for_cluster_analyze(&analyze_script_file_name); |
183 | create_script_for_old_cluster_deletion(&deletion_script_file_name); |
184 | |
185 | issue_warnings_and_set_wal_level(); |
186 | |
187 | pg_log(PG_REPORT, |
188 | "\n" |
189 | "Upgrade Complete\n" |
190 | "----------------\n" ); |
191 | |
192 | output_completion_banner(analyze_script_file_name, |
193 | deletion_script_file_name); |
194 | |
195 | pg_free(analyze_script_file_name); |
196 | pg_free(deletion_script_file_name); |
197 | |
198 | cleanup(); |
199 | |
200 | return 0; |
201 | } |
202 | |
203 | |
204 | static void |
205 | setup(char *argv0, bool *live_check) |
206 | { |
207 | char exec_path[MAXPGPATH]; /* full path to my executable */ |
208 | |
209 | /* |
210 | * make sure the user has a clean environment, otherwise, we may confuse |
211 | * libpq when we connect to one (or both) of the servers. |
212 | */ |
213 | check_pghost_envvar(); |
214 | |
215 | verify_directories(); |
216 | |
217 | /* no postmasters should be running, except for a live check */ |
218 | if (pid_lock_file_exists(old_cluster.pgdata)) |
219 | { |
220 | /* |
221 | * If we have a postmaster.pid file, try to start the server. If it |
222 | * starts, the pid file was stale, so stop the server. If it doesn't |
223 | * start, assume the server is running. If the pid file is left over |
224 | * from a server crash, this also allows any committed transactions |
225 | * stored in the WAL to be replayed so they are not lost, because WAL |
226 | * files are not transferred from old to new servers. We later check |
227 | * for a clean shutdown. |
228 | */ |
229 | if (start_postmaster(&old_cluster, false)) |
230 | stop_postmaster(false); |
231 | else |
232 | { |
233 | if (!user_opts.check) |
234 | pg_fatal("There seems to be a postmaster servicing the old cluster.\n" |
235 | "Please shutdown that postmaster and try again.\n" ); |
236 | else |
237 | *live_check = true; |
238 | } |
239 | } |
240 | |
241 | /* same goes for the new postmaster */ |
242 | if (pid_lock_file_exists(new_cluster.pgdata)) |
243 | { |
244 | if (start_postmaster(&new_cluster, false)) |
245 | stop_postmaster(false); |
246 | else |
247 | pg_fatal("There seems to be a postmaster servicing the new cluster.\n" |
248 | "Please shutdown that postmaster and try again.\n" ); |
249 | } |
250 | |
251 | /* get path to pg_upgrade executable */ |
252 | if (find_my_exec(argv0, exec_path) < 0) |
253 | pg_fatal("%s: could not find own program executable\n" , argv0); |
254 | |
255 | /* Trim off program name and keep just path */ |
256 | *last_dir_separator(exec_path) = '\0'; |
257 | canonicalize_path(exec_path); |
258 | os_info.exec_path = pg_strdup(exec_path); |
259 | } |
260 | |
261 | |
262 | static void |
263 | prepare_new_cluster(void) |
264 | { |
265 | /* |
266 | * It would make more sense to freeze after loading the schema, but that |
267 | * would cause us to lose the frozenids restored by the load. We use |
268 | * --analyze so autovacuum doesn't update statistics later |
269 | */ |
270 | prep_status("Analyzing all rows in the new cluster" ); |
271 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
272 | "\"%s/vacuumdb\" %s --all --analyze %s" , |
273 | new_cluster.bindir, cluster_conn_opts(&new_cluster), |
274 | log_opts.verbose ? "--verbose" : "" ); |
275 | check_ok(); |
276 | |
277 | /* |
278 | * We do freeze after analyze so pg_statistic is also frozen. template0 is |
279 | * not frozen here, but data rows were frozen by initdb, and we set its |
280 | * datfrozenxid, relfrozenxids, and relminmxid later to match the new xid |
281 | * counter later. |
282 | */ |
283 | prep_status("Freezing all rows in the new cluster" ); |
284 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
285 | "\"%s/vacuumdb\" %s --all --freeze %s" , |
286 | new_cluster.bindir, cluster_conn_opts(&new_cluster), |
287 | log_opts.verbose ? "--verbose" : "" ); |
288 | check_ok(); |
289 | } |
290 | |
291 | |
292 | static void |
293 | prepare_new_globals(void) |
294 | { |
295 | /* |
296 | * Before we restore anything, set frozenxids of initdb-created tables. |
297 | */ |
298 | set_frozenxids(false); |
299 | |
300 | /* |
301 | * Now restore global objects (roles and tablespaces). |
302 | */ |
303 | prep_status("Restoring global objects in the new cluster" ); |
304 | |
305 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
306 | "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"" , |
307 | new_cluster.bindir, cluster_conn_opts(&new_cluster), |
308 | GLOBALS_DUMP_FILE); |
309 | check_ok(); |
310 | } |
311 | |
312 | |
313 | static void |
314 | create_new_objects(void) |
315 | { |
316 | int dbnum; |
317 | |
318 | prep_status("Restoring database schemas in the new cluster\n" ); |
319 | |
320 | /* |
321 | * We cannot process the template1 database concurrently with others, |
322 | * because when it's transiently dropped, connection attempts would fail. |
323 | * So handle it in a separate non-parallelized pass. |
324 | */ |
325 | for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) |
326 | { |
327 | char sql_file_name[MAXPGPATH], |
328 | log_file_name[MAXPGPATH]; |
329 | DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; |
330 | const char *create_opts; |
331 | |
332 | /* Process only template1 in this pass */ |
333 | if (strcmp(old_db->db_name, "template1" ) != 0) |
334 | continue; |
335 | |
336 | pg_log(PG_STATUS, "%s" , old_db->db_name); |
337 | snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); |
338 | snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); |
339 | |
340 | /* |
341 | * template1 and postgres databases will already exist in the target |
342 | * installation, so tell pg_restore to drop and recreate them; |
343 | * otherwise we would fail to propagate their database-level |
344 | * properties. |
345 | */ |
346 | create_opts = "--clean --create" ; |
347 | |
348 | exec_prog(log_file_name, |
349 | NULL, |
350 | true, |
351 | true, |
352 | "\"%s/pg_restore\" %s %s --exit-on-error --verbose " |
353 | "--dbname postgres \"%s\"" , |
354 | new_cluster.bindir, |
355 | cluster_conn_opts(&new_cluster), |
356 | create_opts, |
357 | sql_file_name); |
358 | |
359 | break; /* done once we've processed template1 */ |
360 | } |
361 | |
362 | for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) |
363 | { |
364 | char sql_file_name[MAXPGPATH], |
365 | log_file_name[MAXPGPATH]; |
366 | DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; |
367 | const char *create_opts; |
368 | |
369 | /* Skip template1 in this pass */ |
370 | if (strcmp(old_db->db_name, "template1" ) == 0) |
371 | continue; |
372 | |
373 | pg_log(PG_STATUS, "%s" , old_db->db_name); |
374 | snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); |
375 | snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); |
376 | |
377 | /* |
378 | * template1 and postgres databases will already exist in the target |
379 | * installation, so tell pg_restore to drop and recreate them; |
380 | * otherwise we would fail to propagate their database-level |
381 | * properties. |
382 | */ |
383 | if (strcmp(old_db->db_name, "postgres" ) == 0) |
384 | create_opts = "--clean --create" ; |
385 | else |
386 | create_opts = "--create" ; |
387 | |
388 | parallel_exec_prog(log_file_name, |
389 | NULL, |
390 | "\"%s/pg_restore\" %s %s --exit-on-error --verbose " |
391 | "--dbname template1 \"%s\"" , |
392 | new_cluster.bindir, |
393 | cluster_conn_opts(&new_cluster), |
394 | create_opts, |
395 | sql_file_name); |
396 | } |
397 | |
398 | /* reap all children */ |
399 | while (reap_child(true) == true) |
400 | ; |
401 | |
402 | end_progress_output(); |
403 | check_ok(); |
404 | |
405 | /* |
406 | * We don't have minmxids for databases or relations in pre-9.3 clusters, |
407 | * so set those after we have restored the schema. |
408 | */ |
409 | if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) |
410 | set_frozenxids(true); |
411 | |
412 | /* update new_cluster info now that we have objects in the databases */ |
413 | get_db_and_rel_infos(&new_cluster); |
414 | } |
415 | |
416 | /* |
417 | * Delete the given subdirectory contents from the new cluster |
418 | */ |
419 | static void |
420 | remove_new_subdir(const char *subdir, bool rmtopdir) |
421 | { |
422 | char new_path[MAXPGPATH]; |
423 | |
424 | prep_status("Deleting files from new %s" , subdir); |
425 | |
426 | snprintf(new_path, sizeof(new_path), "%s/%s" , new_cluster.pgdata, subdir); |
427 | if (!rmtree(new_path, rmtopdir)) |
428 | pg_fatal("could not delete directory \"%s\"\n" , new_path); |
429 | |
430 | check_ok(); |
431 | } |
432 | |
433 | /* |
434 | * Copy the files from the old cluster into it |
435 | */ |
436 | static void |
437 | copy_subdir_files(const char *old_subdir, const char *new_subdir) |
438 | { |
439 | char old_path[MAXPGPATH]; |
440 | char new_path[MAXPGPATH]; |
441 | |
442 | remove_new_subdir(new_subdir, true); |
443 | |
444 | snprintf(old_path, sizeof(old_path), "%s/%s" , old_cluster.pgdata, old_subdir); |
445 | snprintf(new_path, sizeof(new_path), "%s/%s" , new_cluster.pgdata, new_subdir); |
446 | |
447 | prep_status("Copying old %s to new server" , old_subdir); |
448 | |
449 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
450 | #ifndef WIN32 |
451 | "cp -Rf \"%s\" \"%s\"" , |
452 | #else |
453 | /* flags: everything, no confirm, quiet, overwrite read-only */ |
454 | "xcopy /e /y /q /r \"%s\" \"%s\\\"" , |
455 | #endif |
456 | old_path, new_path); |
457 | |
458 | check_ok(); |
459 | } |
460 | |
461 | static void |
462 | copy_xact_xlog_xid(void) |
463 | { |
464 | /* |
465 | * Copy old commit logs to new data dir. pg_clog has been renamed to |
466 | * pg_xact in post-10 clusters. |
467 | */ |
468 | copy_subdir_files(GET_MAJOR_VERSION(old_cluster.major_version) < 1000 ? |
469 | "pg_clog" : "pg_xact" , |
470 | GET_MAJOR_VERSION(new_cluster.major_version) < 1000 ? |
471 | "pg_clog" : "pg_xact" ); |
472 | |
473 | /* set the next transaction id and epoch of the new cluster */ |
474 | prep_status("Setting next transaction ID and epoch for new cluster" ); |
475 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
476 | "\"%s/pg_resetwal\" -f -x %u \"%s\"" , |
477 | new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid, |
478 | new_cluster.pgdata); |
479 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
480 | "\"%s/pg_resetwal\" -f -e %u \"%s\"" , |
481 | new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch, |
482 | new_cluster.pgdata); |
483 | /* must reset commit timestamp limits also */ |
484 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
485 | "\"%s/pg_resetwal\" -f -c %u,%u \"%s\"" , |
486 | new_cluster.bindir, |
487 | old_cluster.controldata.chkpnt_nxtxid, |
488 | old_cluster.controldata.chkpnt_nxtxid, |
489 | new_cluster.pgdata); |
490 | check_ok(); |
491 | |
492 | /* |
493 | * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change |
494 | * (see pg_upgrade.h) and the new server is after, then we don't copy |
495 | * pg_multixact files, but we need to reset pg_control so that the new |
496 | * server doesn't attempt to read multis older than the cutoff value. |
497 | */ |
498 | if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER && |
499 | new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) |
500 | { |
501 | copy_subdir_files("pg_multixact/offsets" , "pg_multixact/offsets" ); |
502 | copy_subdir_files("pg_multixact/members" , "pg_multixact/members" ); |
503 | |
504 | prep_status("Setting next multixact ID and offset for new cluster" ); |
505 | |
506 | /* |
507 | * we preserve all files and contents, so we must preserve both "next" |
508 | * counters here and the oldest multi present on system. |
509 | */ |
510 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
511 | "\"%s/pg_resetwal\" -O %u -m %u,%u \"%s\"" , |
512 | new_cluster.bindir, |
513 | old_cluster.controldata.chkpnt_nxtmxoff, |
514 | old_cluster.controldata.chkpnt_nxtmulti, |
515 | old_cluster.controldata.chkpnt_oldstMulti, |
516 | new_cluster.pgdata); |
517 | check_ok(); |
518 | } |
519 | else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) |
520 | { |
521 | /* |
522 | * Remove offsets/0000 file created by initdb that no longer matches |
523 | * the new multi-xid value. "members" starts at zero so no need to |
524 | * remove it. |
525 | */ |
526 | remove_new_subdir("pg_multixact/offsets" , false); |
527 | |
528 | prep_status("Setting oldest multixact ID in new cluster" ); |
529 | |
530 | /* |
531 | * We don't preserve files in this case, but it's important that the |
532 | * oldest multi is set to the latest value used by the old system, so |
533 | * that multixact.c returns the empty set for multis that might be |
534 | * present on disk. We set next multi to the value following that; it |
535 | * might end up wrapped around (i.e. 0) if the old cluster had |
536 | * next=MaxMultiXactId, but multixact.c can cope with that just fine. |
537 | */ |
538 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
539 | "\"%s/pg_resetwal\" -m %u,%u \"%s\"" , |
540 | new_cluster.bindir, |
541 | old_cluster.controldata.chkpnt_nxtmulti + 1, |
542 | old_cluster.controldata.chkpnt_nxtmulti, |
543 | new_cluster.pgdata); |
544 | check_ok(); |
545 | } |
546 | |
547 | /* now reset the wal archives in the new cluster */ |
548 | prep_status("Resetting WAL archives" ); |
549 | exec_prog(UTILITY_LOG_FILE, NULL, true, true, |
550 | /* use timeline 1 to match controldata and no WAL history file */ |
551 | "\"%s/pg_resetwal\" -l 00000001%s \"%s\"" , new_cluster.bindir, |
552 | old_cluster.controldata.nextxlogfile + 8, |
553 | new_cluster.pgdata); |
554 | check_ok(); |
555 | } |
556 | |
557 | |
558 | /* |
559 | * set_frozenxids() |
560 | * |
561 | * This is called on the new cluster before we restore anything, with |
562 | * minmxid_only = false. Its purpose is to ensure that all initdb-created |
563 | * vacuumable tables have relfrozenxid/relminmxid matching the old cluster's |
564 | * xid/mxid counters. We also initialize the datfrozenxid/datminmxid of the |
565 | * built-in databases to match. |
566 | * |
567 | * As we create user tables later, their relfrozenxid/relminmxid fields will |
568 | * be restored properly by the binary-upgrade restore script. Likewise for |
569 | * user-database datfrozenxid/datminmxid. However, if we're upgrading from a |
570 | * pre-9.3 database, which does not store per-table or per-DB minmxid, then |
571 | * the relminmxid/datminmxid values filled in by the restore script will just |
572 | * be zeroes. |
573 | * |
574 | * Hence, with a pre-9.3 source database, a second call occurs after |
575 | * everything is restored, with minmxid_only = true. This pass will |
576 | * initialize all tables and databases, both those made by initdb and user |
577 | * objects, with the desired minmxid value. frozenxid values are left alone. |
578 | */ |
579 | static void |
580 | set_frozenxids(bool minmxid_only) |
581 | { |
582 | int dbnum; |
583 | PGconn *conn, |
584 | *conn_template1; |
585 | PGresult *dbres; |
586 | int ntups; |
587 | int i_datname; |
588 | int i_datallowconn; |
589 | |
590 | if (!minmxid_only) |
591 | prep_status("Setting frozenxid and minmxid counters in new cluster" ); |
592 | else |
593 | prep_status("Setting minmxid counter in new cluster" ); |
594 | |
595 | conn_template1 = connectToServer(&new_cluster, "template1" ); |
596 | |
597 | if (!minmxid_only) |
598 | /* set pg_database.datfrozenxid */ |
599 | PQclear(executeQueryOrDie(conn_template1, |
600 | "UPDATE pg_catalog.pg_database " |
601 | "SET datfrozenxid = '%u'" , |
602 | old_cluster.controldata.chkpnt_nxtxid)); |
603 | |
604 | /* set pg_database.datminmxid */ |
605 | PQclear(executeQueryOrDie(conn_template1, |
606 | "UPDATE pg_catalog.pg_database " |
607 | "SET datminmxid = '%u'" , |
608 | old_cluster.controldata.chkpnt_nxtmulti)); |
609 | |
610 | /* get database names */ |
611 | dbres = executeQueryOrDie(conn_template1, |
612 | "SELECT datname, datallowconn " |
613 | "FROM pg_catalog.pg_database" ); |
614 | |
615 | i_datname = PQfnumber(dbres, "datname" ); |
616 | i_datallowconn = PQfnumber(dbres, "datallowconn" ); |
617 | |
618 | ntups = PQntuples(dbres); |
619 | for (dbnum = 0; dbnum < ntups; dbnum++) |
620 | { |
621 | char *datname = PQgetvalue(dbres, dbnum, i_datname); |
622 | char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn); |
623 | |
624 | /* |
625 | * We must update databases where datallowconn = false, e.g. |
626 | * template0, because autovacuum increments their datfrozenxids, |
627 | * relfrozenxids, and relminmxid even if autovacuum is turned off, and |
628 | * even though all the data rows are already frozen. To enable this, |
629 | * we temporarily change datallowconn. |
630 | */ |
631 | if (strcmp(datallowconn, "f" ) == 0) |
632 | PQclear(executeQueryOrDie(conn_template1, |
633 | "ALTER DATABASE %s ALLOW_CONNECTIONS = true" , |
634 | quote_identifier(datname))); |
635 | |
636 | conn = connectToServer(&new_cluster, datname); |
637 | |
638 | if (!minmxid_only) |
639 | /* set pg_class.relfrozenxid */ |
640 | PQclear(executeQueryOrDie(conn, |
641 | "UPDATE pg_catalog.pg_class " |
642 | "SET relfrozenxid = '%u' " |
643 | /* only heap, materialized view, and TOAST are vacuumed */ |
644 | "WHERE relkind IN (" |
645 | CppAsString2(RELKIND_RELATION) ", " |
646 | CppAsString2(RELKIND_MATVIEW) ", " |
647 | CppAsString2(RELKIND_TOASTVALUE) ")" , |
648 | old_cluster.controldata.chkpnt_nxtxid)); |
649 | |
650 | /* set pg_class.relminmxid */ |
651 | PQclear(executeQueryOrDie(conn, |
652 | "UPDATE pg_catalog.pg_class " |
653 | "SET relminmxid = '%u' " |
654 | /* only heap, materialized view, and TOAST are vacuumed */ |
655 | "WHERE relkind IN (" |
656 | CppAsString2(RELKIND_RELATION) ", " |
657 | CppAsString2(RELKIND_MATVIEW) ", " |
658 | CppAsString2(RELKIND_TOASTVALUE) ")" , |
659 | old_cluster.controldata.chkpnt_nxtmulti)); |
660 | PQfinish(conn); |
661 | |
662 | /* Reset datallowconn flag */ |
663 | if (strcmp(datallowconn, "f" ) == 0) |
664 | PQclear(executeQueryOrDie(conn_template1, |
665 | "ALTER DATABASE %s ALLOW_CONNECTIONS = false" , |
666 | quote_identifier(datname))); |
667 | } |
668 | |
669 | PQclear(dbres); |
670 | |
671 | PQfinish(conn_template1); |
672 | |
673 | check_ok(); |
674 | } |
675 | |
676 | |
677 | static void |
678 | cleanup(void) |
679 | { |
680 | fclose(log_opts.internal); |
681 | |
682 | /* Remove dump and log files? */ |
683 | if (!log_opts.retain) |
684 | { |
685 | int dbnum; |
686 | char **filename; |
687 | |
688 | for (filename = output_files; *filename != NULL; filename++) |
689 | unlink(*filename); |
690 | |
691 | /* remove dump files */ |
692 | unlink(GLOBALS_DUMP_FILE); |
693 | |
694 | if (old_cluster.dbarr.dbs) |
695 | for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) |
696 | { |
697 | char sql_file_name[MAXPGPATH], |
698 | log_file_name[MAXPGPATH]; |
699 | DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; |
700 | |
701 | snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); |
702 | unlink(sql_file_name); |
703 | |
704 | snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); |
705 | unlink(log_file_name); |
706 | } |
707 | } |
708 | } |
709 | |