In pg_upgrade, dump each database separately and use
--single-transaction to restore each database schema. This yields performance improvements for databases with many tables. Also, remove split_old_dump() as it is no longer needed.
This commit is contained in:
parent
bd9c8e741b
commit
12ee6ec71f
|
@ -72,7 +72,7 @@ output_check_banner(bool *live_check)
|
|||
|
||||
|
||||
void
|
||||
check_old_cluster(bool live_check, char **sequence_script_file_name)
|
||||
check_and_dump_old_cluster(bool live_check, char **sequence_script_file_name)
|
||||
{
|
||||
/* -- OLD -- */
|
||||
|
||||
|
@ -131,10 +131,7 @@ check_old_cluster(bool live_check, char **sequence_script_file_name)
|
|||
* the old server is running.
|
||||
*/
|
||||
if (!user_opts.check)
|
||||
{
|
||||
generate_old_dump();
|
||||
split_old_dump();
|
||||
}
|
||||
|
||||
if (!live_check)
|
||||
stop_postmaster(false);
|
||||
|
|
|
@ -16,95 +16,34 @@
|
|||
void
|
||||
generate_old_dump(void)
|
||||
{
|
||||
/* run new pg_dumpall binary */
|
||||
prep_status("Creating catalog dump");
|
||||
int dbnum;
|
||||
|
||||
/*
|
||||
* --binary-upgrade records the width of dropped columns in pg_class, and
|
||||
* restores the frozenid's for databases and relations.
|
||||
*/
|
||||
prep_status("Creating catalog dump\n");
|
||||
|
||||
pg_log(PG_REPORT, OVERWRITE_MESSAGE, "global objects");
|
||||
|
||||
/* run new pg_dumpall binary for globals */
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
"\"%s/pg_dumpall\" %s --schema-only --binary-upgrade %s -f %s",
|
||||
"\"%s/pg_dumpall\" %s --schema-only --globals-only --binary-upgrade %s -f %s",
|
||||
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
||||
log_opts.verbose ? "--verbose" : "",
|
||||
ALL_DUMP_FILE);
|
||||
check_ok();
|
||||
}
|
||||
GLOBALS_DUMP_FILE);
|
||||
|
||||
|
||||
/*
|
||||
* split_old_dump
|
||||
*
|
||||
* This function splits pg_dumpall output into global values and
|
||||
* database creation, and per-db schemas. This allows us to create
|
||||
* the support functions between restoring these two parts of the
|
||||
* dump. We split on the first "\connect " after a CREATE ROLE
|
||||
* username match; this is where the per-db restore starts.
|
||||
*
|
||||
* We suppress recreation of our own username so we don't generate
|
||||
* an error during restore
|
||||
*/
|
||||
void
|
||||
split_old_dump(void)
|
||||
{
|
||||
FILE *all_dump,
|
||||
*globals_dump,
|
||||
*db_dump;
|
||||
FILE *current_output;
|
||||
char line[LINE_ALLOC];
|
||||
bool start_of_line = true;
|
||||
char create_role_str[MAX_STRING];
|
||||
char create_role_str_quote[MAX_STRING];
|
||||
char filename[MAXPGPATH];
|
||||
bool suppressed_username = false;
|
||||
|
||||
|
||||
/*
|
||||
* Open all files in binary mode to avoid line end translation on Windows,
|
||||
* both for input and output.
|
||||
*/
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s", ALL_DUMP_FILE);
|
||||
if ((all_dump = fopen(filename, PG_BINARY_R)) == NULL)
|
||||
pg_log(PG_FATAL, "Could not open dump file \"%s\": %s\n", filename, getErrorText(errno));
|
||||
snprintf(filename, sizeof(filename), "%s", GLOBALS_DUMP_FILE);
|
||||
if ((globals_dump = fopen_priv(filename, PG_BINARY_W)) == NULL)
|
||||
pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno));
|
||||
snprintf(filename, sizeof(filename), "%s", DB_DUMP_FILE);
|
||||
if ((db_dump = fopen_priv(filename, PG_BINARY_W)) == NULL)
|
||||
pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno));
|
||||
|
||||
current_output = globals_dump;
|
||||
|
||||
/* patterns used to prevent our own username from being recreated */
|
||||
snprintf(create_role_str, sizeof(create_role_str),
|
||||
"CREATE ROLE %s;", os_info.user);
|
||||
snprintf(create_role_str_quote, sizeof(create_role_str_quote),
|
||||
"CREATE ROLE %s;", quote_identifier(os_info.user));
|
||||
|
||||
while (fgets(line, sizeof(line), all_dump) != NULL)
|
||||
/* create per-db dump files */
|
||||
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
|
||||
{
|
||||
/* switch to db_dump file output? */
|
||||
if (current_output == globals_dump && start_of_line &&
|
||||
suppressed_username &&
|
||||
strncmp(line, "\\connect ", strlen("\\connect ")) == 0)
|
||||
current_output = db_dump;
|
||||
char file_name[MAXPGPATH];
|
||||
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
||||
|
||||
/* output unless we are recreating our own username */
|
||||
if (current_output != globals_dump || !start_of_line ||
|
||||
(strncmp(line, create_role_str, strlen(create_role_str)) != 0 &&
|
||||
strncmp(line, create_role_str_quote, strlen(create_role_str_quote)) != 0))
|
||||
fputs(line, current_output);
|
||||
else
|
||||
suppressed_username = true;
|
||||
pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_db->db_name);
|
||||
snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
|
||||
|
||||
if (strlen(line) > 0 && line[strlen(line) - 1] == '\n')
|
||||
start_of_line = true;
|
||||
else
|
||||
start_of_line = false;
|
||||
exec_prog(RESTORE_LOG_FILE, NULL, true,
|
||||
"\"%s/pg_dump\" %s --schema-only --binary-upgrade --format=custom %s --file=\"%s\" \"%s\"",
|
||||
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
||||
log_opts.verbose ? "--verbose" : "", file_name, old_db->db_name);
|
||||
}
|
||||
|
||||
fclose(all_dump);
|
||||
fclose(globals_dump);
|
||||
fclose(db_dump);
|
||||
end_progress_output();
|
||||
check_ok();
|
||||
}
|
||||
|
|
|
@ -104,8 +104,10 @@ exec_prog(const char *log_file, const char *opt_log_file,
|
|||
|
||||
if (result != 0)
|
||||
{
|
||||
report_status(PG_REPORT, "*failure*");
|
||||
/* we might be in on a progress status line, so go to the next line */
|
||||
report_status(PG_REPORT, "\n*failure*");
|
||||
fflush(stdout);
|
||||
|
||||
pg_log(PG_VERBOSE, "There were problems executing \"%s\"\n", cmd);
|
||||
if (opt_log_file)
|
||||
pg_log(throw_error ? PG_FATAL : PG_REPORT,
|
||||
|
|
|
@ -92,7 +92,7 @@ main(int argc, char **argv)
|
|||
|
||||
check_cluster_compatibility(live_check);
|
||||
|
||||
check_old_cluster(live_check, &sequence_script_file_name);
|
||||
check_and_dump_old_cluster(live_check, &sequence_script_file_name);
|
||||
|
||||
|
||||
/* -- NEW -- */
|
||||
|
@ -282,6 +282,11 @@ create_new_objects(void)
|
|||
|
||||
prep_status("Adding support functions to new cluster");
|
||||
|
||||
/*
|
||||
* Technically, we only need to install these support functions in new
|
||||
* databases that also exist in the old cluster, but for completeness
|
||||
* we process all new databases.
|
||||
*/
|
||||
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
|
||||
{
|
||||
DbInfo *new_db = &new_cluster.dbarr.dbs[dbnum];
|
||||
|
@ -292,11 +297,27 @@ create_new_objects(void)
|
|||
}
|
||||
check_ok();
|
||||
|
||||
prep_status("Restoring database schema to new cluster");
|
||||
prep_status("Restoring database schema to new cluster\n");
|
||||
|
||||
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
|
||||
{
|
||||
char file_name[MAXPGPATH];
|
||||
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
||||
|
||||
pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_db->db_name);
|
||||
snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
|
||||
|
||||
/*
|
||||
* Using pg_restore --single-transaction is faster than other
|
||||
* methods, like --jobs. pg_dump only produces its output at the
|
||||
* end, so there is little parallelism using the pipe.
|
||||
*/
|
||||
exec_prog(RESTORE_LOG_FILE, NULL, true,
|
||||
"\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
|
||||
"\"%s/pg_restore\" %s --exit-on-error --single-transaction --verbose --dbname \"%s\" \"%s\"",
|
||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||
DB_DUMP_FILE);
|
||||
old_db->db_name, file_name);
|
||||
}
|
||||
end_progress_output();
|
||||
check_ok();
|
||||
|
||||
/* regenerate now that we have objects in the databases */
|
||||
|
@ -455,14 +476,23 @@ cleanup(void)
|
|||
/* Remove dump and log files? */
|
||||
if (!log_opts.retain)
|
||||
{
|
||||
int dbnum;
|
||||
char **filename;
|
||||
|
||||
for (filename = output_files; *filename != NULL; filename++)
|
||||
unlink(*filename);
|
||||
|
||||
/* remove SQL files */
|
||||
unlink(ALL_DUMP_FILE);
|
||||
/* remove dump files */
|
||||
unlink(GLOBALS_DUMP_FILE);
|
||||
unlink(DB_DUMP_FILE);
|
||||
|
||||
if (old_cluster.dbarr.dbs)
|
||||
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
|
||||
{
|
||||
char file_name[MAXPGPATH];
|
||||
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
||||
|
||||
snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
|
||||
unlink(file_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,10 +29,9 @@
|
|||
#define OVERWRITE_MESSAGE " %-" MESSAGE_WIDTH "." MESSAGE_WIDTH "s\r"
|
||||
#define GET_MAJOR_VERSION(v) ((v) / 100)
|
||||
|
||||
#define ALL_DUMP_FILE "pg_upgrade_dump_all.sql"
|
||||
/* contains both global db information and CREATE DATABASE commands */
|
||||
#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql"
|
||||
#define DB_DUMP_FILE "pg_upgrade_dump_db.sql"
|
||||
#define DB_DUMP_FILE_MASK "pg_upgrade_dump_%u.custom"
|
||||
|
||||
#define SERVER_LOG_FILE "pg_upgrade_server.log"
|
||||
#define RESTORE_LOG_FILE "pg_upgrade_restore.log"
|
||||
|
@ -296,7 +295,7 @@ extern OSInfo os_info;
|
|||
/* check.c */
|
||||
|
||||
void output_check_banner(bool *live_check);
|
||||
void check_old_cluster(bool live_check,
|
||||
void check_and_dump_old_cluster(bool live_check,
|
||||
char **sequence_script_file_name);
|
||||
void check_new_cluster(void);
|
||||
void report_clusters_compatible(void);
|
||||
|
@ -319,7 +318,6 @@ void disable_old_cluster(void);
|
|||
/* dump.c */
|
||||
|
||||
void generate_old_dump(void);
|
||||
void split_old_dump(void);
|
||||
|
||||
|
||||
/* exec.c */
|
||||
|
@ -433,6 +431,7 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
|||
void
|
||||
pg_log(eLogType type, char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
||||
void end_progress_output(void);
|
||||
void
|
||||
prep_status(const char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
|
||||
|
|
|
@ -82,9 +82,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
|
|||
}
|
||||
}
|
||||
|
||||
prep_status(" "); /* in case nothing printed; pass a space so
|
||||
* gcc doesn't complain about empty format
|
||||
* string */
|
||||
end_progress_output();
|
||||
check_ok();
|
||||
|
||||
return msg;
|
||||
|
|
|
@ -35,6 +35,18 @@ report_status(eLogType type, const char *fmt,...)
|
|||
}
|
||||
|
||||
|
||||
/* force blank output for progress display */
|
||||
void
|
||||
end_progress_output(void)
|
||||
{
|
||||
/*
|
||||
* In case nothing printed; pass a space so gcc doesn't complain about
|
||||
* empty format string.
|
||||
*/
|
||||
prep_status(" ");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* prep_status
|
||||
*
|
||||
|
|
|
@ -502,7 +502,7 @@ main(int argc, char *argv[])
|
|||
}
|
||||
|
||||
/* Dump CREATE DATABASE commands */
|
||||
if (!globals_only && !roles_only && !tablespaces_only)
|
||||
if (binary_upgrade || (!globals_only && !roles_only && !tablespaces_only))
|
||||
dumpCreateDB(conn);
|
||||
|
||||
/* Dump role/database settings */
|
||||
|
@ -745,8 +745,10 @@ dumpRoles(PGconn *conn)
|
|||
* will acquire the right properties even if it already exists (ie, it
|
||||
* won't hurt for the CREATE to fail). This is particularly important
|
||||
* for the role we are connected as, since even with --clean we will
|
||||
* have failed to drop it.
|
||||
* have failed to drop it. binary_upgrade cannot generate any errors,
|
||||
* so we assume the role is already created.
|
||||
*/
|
||||
if (!binary_upgrade)
|
||||
appendPQExpBuffer(buf, "CREATE ROLE %s;\n", fmtId(rolename));
|
||||
appendPQExpBuffer(buf, "ALTER ROLE %s WITH", fmtId(rolename));
|
||||
|
||||
|
|
Loading…
Reference in New Issue