Restructure the pg_upgrade code to use several global structures rather

than packing everything into 'ctx' and passing that to every function.
This commit is contained in:
Bruce Momjian 2010-10-19 21:38:16 +00:00
parent 6e74a91b2b
commit e13f7e9a71
17 changed files with 972 additions and 972 deletions

View File

@ -10,73 +10,73 @@
#include "pg_upgrade.h"
static void set_locale_and_encoding(migratorContext *ctx, Cluster whichCluster);
static void check_new_db_is_empty(migratorContext *ctx);
static void check_locale_and_encoding(migratorContext *ctx, ControlData *oldctrl,
static void set_locale_and_encoding(Cluster whichCluster);
static void check_new_db_is_empty(void);
static void check_locale_and_encoding(ControlData *oldctrl,
ControlData *newctrl);
static void check_for_isn_and_int8_passing_mismatch(migratorContext *ctx,
static void check_for_isn_and_int8_passing_mismatch(
Cluster whichCluster);
static void check_for_reg_data_type_usage(migratorContext *ctx, Cluster whichCluster);
static void check_for_reg_data_type_usage(Cluster whichCluster);
void
output_check_banner(migratorContext *ctx, bool *live_check)
output_check_banner(bool *live_check)
{
if (ctx->check && is_server_running(ctx, ctx->old.pgdata))
if (user_opts.check && is_server_running(old_cluster.pgdata))
{
*live_check = true;
if (ctx->old.port == ctx->new.port)
pg_log(ctx, PG_FATAL, "When checking a live server, "
if (old_cluster.port == new_cluster.port)
pg_log(PG_FATAL, "When checking a live server, "
"the old and new port numbers must be different.\n");
pg_log(ctx, PG_REPORT, "PerForming Consistency Checks on Old Live Server\n");
pg_log(ctx, PG_REPORT, "------------------------------------------------\n");
pg_log(PG_REPORT, "PerForming Consistency Checks on Old Live Server\n");
pg_log(PG_REPORT, "------------------------------------------------\n");
}
else
{
pg_log(ctx, PG_REPORT, "Performing Consistency Checks\n");
pg_log(ctx, PG_REPORT, "-----------------------------\n");
pg_log(PG_REPORT, "Performing Consistency Checks\n");
pg_log(PG_REPORT, "-----------------------------\n");
}
}
void
check_old_cluster(migratorContext *ctx, bool live_check,
check_old_cluster(bool live_check,
char **sequence_script_file_name)
{
/* -- OLD -- */
if (!live_check)
start_postmaster(ctx, CLUSTER_OLD, false);
start_postmaster(CLUSTER_OLD, false);
set_locale_and_encoding(ctx, CLUSTER_OLD);
set_locale_and_encoding(CLUSTER_OLD);
get_pg_database_relfilenode(ctx, CLUSTER_OLD);
get_pg_database_relfilenode(CLUSTER_OLD);
/* Extract a list of databases and tables from the old cluster */
get_db_and_rel_infos(ctx, &ctx->old.dbarr, CLUSTER_OLD);
get_db_and_rel_infos(&old_cluster.dbarr, CLUSTER_OLD);
init_tablespaces(ctx);
init_tablespaces();
get_loadable_libraries(ctx);
get_loadable_libraries();
/*
* Check for various failure cases
*/
check_for_reg_data_type_usage(ctx, CLUSTER_OLD);
check_for_isn_and_int8_passing_mismatch(ctx, CLUSTER_OLD);
check_for_reg_data_type_usage(CLUSTER_OLD);
check_for_isn_and_int8_passing_mismatch(CLUSTER_OLD);
/* old = PG 8.3 checks? */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 803)
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 803)
{
old_8_3_check_for_name_data_type_usage(ctx, CLUSTER_OLD);
old_8_3_check_for_tsquery_usage(ctx, CLUSTER_OLD);
if (ctx->check)
old_8_3_check_for_name_data_type_usage(CLUSTER_OLD);
old_8_3_check_for_tsquery_usage(CLUSTER_OLD);
if (user_opts.check)
{
old_8_3_rebuild_tsvector_tables(ctx, true, CLUSTER_OLD);
old_8_3_invalidate_hash_gin_indexes(ctx, true, CLUSTER_OLD);
old_8_3_invalidate_bpchar_pattern_ops_indexes(ctx, true, CLUSTER_OLD);
old_8_3_rebuild_tsvector_tables(true, CLUSTER_OLD);
old_8_3_invalidate_hash_gin_indexes(true, CLUSTER_OLD);
old_8_3_invalidate_bpchar_pattern_ops_indexes(true, CLUSTER_OLD);
}
else
@ -86,120 +86,120 @@ check_old_cluster(migratorContext *ctx, bool live_check,
* end.
*/
*sequence_script_file_name =
old_8_3_create_sequence_script(ctx, CLUSTER_OLD);
old_8_3_create_sequence_script(CLUSTER_OLD);
}
/* Pre-PG 9.0 had no large object permissions */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 804)
new_9_0_populate_pg_largeobject_metadata(ctx, true, CLUSTER_OLD);
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
new_9_0_populate_pg_largeobject_metadata(true, CLUSTER_OLD);
/*
* While not a check option, we do this now because this is the only time
* the old server is running.
*/
if (!ctx->check)
if (!user_opts.check)
{
generate_old_dump(ctx);
split_old_dump(ctx);
generate_old_dump();
split_old_dump();
}
if (!live_check)
stop_postmaster(ctx, false, false);
stop_postmaster(false, false);
}
void
check_new_cluster(migratorContext *ctx)
check_new_cluster(void)
{
set_locale_and_encoding(ctx, CLUSTER_NEW);
set_locale_and_encoding(CLUSTER_NEW);
check_new_db_is_empty(ctx);
check_new_db_is_empty();
check_loadable_libraries(ctx);
check_loadable_libraries();
check_locale_and_encoding(ctx, &ctx->old.controldata, &ctx->new.controldata);
check_locale_and_encoding(&old_cluster.controldata, &new_cluster.controldata);
if (ctx->transfer_mode == TRANSFER_MODE_LINK)
check_hard_link(ctx);
if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
check_hard_link();
}
void
report_clusters_compatible(migratorContext *ctx)
report_clusters_compatible(void)
{
if (ctx->check)
if (user_opts.check)
{
pg_log(ctx, PG_REPORT, "\n*Clusters are compatible*\n");
pg_log(PG_REPORT, "\n*Clusters are compatible*\n");
/* stops new cluster */
stop_postmaster(ctx, false, false);
exit_nicely(ctx, false);
stop_postmaster(false, false);
exit_nicely(false);
}
pg_log(ctx, PG_REPORT, "\n"
pg_log(PG_REPORT, "\n"
"| If pg_upgrade fails after this point, you must\n"
"| re-initdb the new cluster before continuing.\n"
"| You will also need to remove the \".old\" suffix\n"
"| from %s/global/pg_control.old.\n", ctx->old.pgdata);
"| from %s/global/pg_control.old.\n", old_cluster.pgdata);
}
void
issue_warnings(migratorContext *ctx, char *sequence_script_file_name)
issue_warnings(char *sequence_script_file_name)
{
/* old = PG 8.3 warnings? */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 803)
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 803)
{
start_postmaster(ctx, CLUSTER_NEW, true);
start_postmaster(CLUSTER_NEW, true);
/* restore proper sequence values using file created from old server */
if (sequence_script_file_name)
{
prep_status(ctx, "Adjusting sequences");
exec_prog(ctx, true,
prep_status("Adjusting sequences");
exec_prog(true,
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on "
"--no-psqlrc --port %d --username \"%s\" "
"-f \"%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user,
sequence_script_file_name, ctx->logfile);
new_cluster.bindir, new_cluster.port, os_info.user,
sequence_script_file_name, log.filename);
unlink(sequence_script_file_name);
check_ok(ctx);
check_ok();
}
old_8_3_rebuild_tsvector_tables(ctx, false, CLUSTER_NEW);
old_8_3_invalidate_hash_gin_indexes(ctx, false, CLUSTER_NEW);
old_8_3_invalidate_bpchar_pattern_ops_indexes(ctx, false, CLUSTER_NEW);
stop_postmaster(ctx, false, true);
old_8_3_rebuild_tsvector_tables(false, CLUSTER_NEW);
old_8_3_invalidate_hash_gin_indexes(false, CLUSTER_NEW);
old_8_3_invalidate_bpchar_pattern_ops_indexes(false, CLUSTER_NEW);
stop_postmaster(false, true);
}
/* Create dummy large object permissions for old < PG 9.0? */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 804)
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
{
start_postmaster(ctx, CLUSTER_NEW, true);
new_9_0_populate_pg_largeobject_metadata(ctx, false, CLUSTER_NEW);
stop_postmaster(ctx, false, true);
start_postmaster(CLUSTER_NEW, true);
new_9_0_populate_pg_largeobject_metadata(false, CLUSTER_NEW);
stop_postmaster(false, true);
}
}
void
output_completion_banner(migratorContext *ctx, char *deletion_script_file_name)
output_completion_banner(char *deletion_script_file_name)
{
/* Did we migrate the free space files? */
if (GET_MAJOR_VERSION(ctx->old.major_version) >= 804)
pg_log(ctx, PG_REPORT,
if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804)
pg_log(PG_REPORT,
"| Optimizer statistics is not transferred by pg_upgrade\n"
"| so consider running:\n"
"| \tvacuumdb --all --analyze-only\n"
"| on the newly-upgraded cluster.\n\n");
else
pg_log(ctx, PG_REPORT,
pg_log(PG_REPORT,
"| Optimizer statistics and free space information\n"
"| are not transferred by pg_upgrade so consider\n"
"| running:\n"
"| \tvacuumdb --all --analyze\n"
"| on the newly-upgraded cluster.\n\n");
pg_log(ctx, PG_REPORT,
pg_log(PG_REPORT,
"| Running this script will delete the old cluster's data files:\n"
"| \t%s\n",
deletion_script_file_name);
@ -207,33 +207,33 @@ output_completion_banner(migratorContext *ctx, char *deletion_script_file_name)
void
check_cluster_versions(migratorContext *ctx)
check_cluster_versions(void)
{
/* get old and new cluster versions */
ctx->old.major_version = get_major_server_version(ctx, &ctx->old.major_version_str, CLUSTER_OLD);
ctx->new.major_version = get_major_server_version(ctx, &ctx->new.major_version_str, CLUSTER_NEW);
old_cluster.major_version = get_major_server_version(&old_cluster.major_version_str, CLUSTER_OLD);
new_cluster.major_version = get_major_server_version(&new_cluster.major_version_str, CLUSTER_NEW);
/* We allow migration from/to the same major version for beta upgrades */
if (GET_MAJOR_VERSION(ctx->old.major_version) < 803)
pg_log(ctx, PG_FATAL, "This utility can only upgrade from PostgreSQL version 8.3 and later.\n");
if (GET_MAJOR_VERSION(old_cluster.major_version) < 803)
pg_log(PG_FATAL, "This utility can only upgrade from PostgreSQL version 8.3 and later.\n");
/* Only current PG version is supported as a target */
if (GET_MAJOR_VERSION(ctx->new.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
pg_log(ctx, PG_FATAL, "This utility can only upgrade to PostgreSQL version %s.\n",
if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
pg_log(PG_FATAL, "This utility can only upgrade to PostgreSQL version %s.\n",
PG_MAJORVERSION);
/*
* We can't allow downgrading because we use the target pg_dumpall, and
* pg_dumpall cannot operate on new datbase versions, only older versions.
*/
if (ctx->old.major_version > ctx->new.major_version)
pg_log(ctx, PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
if (old_cluster.major_version > new_cluster.major_version)
pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
}
void
check_cluster_compatibility(migratorContext *ctx, bool live_check)
check_cluster_compatibility(bool live_check)
{
char libfile[MAXPGPATH];
FILE *lib_test;
@ -242,24 +242,24 @@ check_cluster_compatibility(migratorContext *ctx, bool live_check)
* Test pg_upgrade_support.so is in the proper place. We cannot copy it
* ourselves because install directories are typically root-owned.
*/
snprintf(libfile, sizeof(libfile), "%s/pg_upgrade_support%s", ctx->new.libpath,
snprintf(libfile, sizeof(libfile), "%s/pg_upgrade_support%s", new_cluster.libpath,
DLSUFFIX);
if ((lib_test = fopen(libfile, "r")) == NULL)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"\npg_upgrade_support%s must be created and installed in %s\n", DLSUFFIX, libfile);
else
fclose(lib_test);
/* get/check pg_control data of servers */
get_control_data(ctx, &ctx->old, live_check);
get_control_data(ctx, &ctx->new, false);
check_control_data(ctx, &ctx->old.controldata, &ctx->new.controldata);
get_control_data(&old_cluster, live_check);
get_control_data(&new_cluster, false);
check_control_data(&old_cluster.controldata, &new_cluster.controldata);
/* Is it 9.0 but without tablespace directories? */
if (GET_MAJOR_VERSION(ctx->new.major_version) == 900 &&
ctx->new.controldata.cat_ver < TABLE_SPACE_SUBDIRS)
pg_log(ctx, PG_FATAL, "This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n"
if (GET_MAJOR_VERSION(new_cluster.major_version) == 900 &&
new_cluster.controldata.cat_ver < TABLE_SPACE_SUBDIRS)
pg_log(PG_FATAL, "This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n"
"because of backend API changes made during development.\n");
}
@ -270,17 +270,16 @@ check_cluster_compatibility(migratorContext *ctx, bool live_check)
* query the database to get the template0 locale
*/
static void
set_locale_and_encoding(migratorContext *ctx, Cluster whichCluster)
set_locale_and_encoding(Cluster whichCluster)
{
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
ControlData *ctrl = &active_cluster->controldata;
PGconn *conn;
PGresult *res;
int i_encoding;
ControlData *ctrl = (whichCluster == CLUSTER_OLD) ?
&ctx->old.controldata : &ctx->new.controldata;
int cluster_version = (whichCluster == CLUSTER_OLD) ?
ctx->old.major_version : ctx->new.major_version;
int cluster_version = active_cluster->major_version;
conn = connectToServer(ctx, "template1", whichCluster);
conn = connectToServer("template1", whichCluster);
/* for pg < 80400, we got the values from pg_controldata */
if (cluster_version >= 80400)
@ -288,7 +287,7 @@ set_locale_and_encoding(migratorContext *ctx, Cluster whichCluster)
int i_datcollate;
int i_datctype;
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT datcollate, datctype "
"FROM pg_catalog.pg_database "
"WHERE datname = 'template0' ");
@ -297,20 +296,20 @@ set_locale_and_encoding(migratorContext *ctx, Cluster whichCluster)
i_datcollate = PQfnumber(res, "datcollate");
i_datctype = PQfnumber(res, "datctype");
ctrl->lc_collate = pg_strdup(ctx, PQgetvalue(res, 0, i_datcollate));
ctrl->lc_ctype = pg_strdup(ctx, PQgetvalue(res, 0, i_datctype));
ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype));
PQclear(res);
}
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT pg_catalog.pg_encoding_to_char(encoding) "
"FROM pg_catalog.pg_database "
"WHERE datname = 'template0' ");
assert(PQntuples(res) == 1);
i_encoding = PQfnumber(res, "pg_encoding_to_char");
ctrl->encoding = pg_strdup(ctx, PQgetvalue(res, 0, i_encoding));
ctrl->encoding = pg_strdup(PQgetvalue(res, 0, i_encoding));
PQclear(res);
@ -325,33 +324,33 @@ set_locale_and_encoding(migratorContext *ctx, Cluster whichCluster)
* we probably had to get via a database query.
*/
static void
check_locale_and_encoding(migratorContext *ctx, ControlData *oldctrl,
check_locale_and_encoding(ControlData *oldctrl,
ControlData *newctrl)
{
if (strcmp(oldctrl->lc_collate, newctrl->lc_collate) != 0)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new cluster lc_collate values do not match\n");
if (strcmp(oldctrl->lc_ctype, newctrl->lc_ctype) != 0)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new cluster lc_ctype values do not match\n");
if (strcmp(oldctrl->encoding, newctrl->encoding) != 0)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new cluster encoding values do not match\n");
}
static void
check_new_db_is_empty(migratorContext *ctx)
check_new_db_is_empty(void)
{
int dbnum;
bool found = false;
get_db_and_rel_infos(ctx, &ctx->new.dbarr, CLUSTER_NEW);
get_db_and_rel_infos(&new_cluster.dbarr, CLUSTER_NEW);
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
{
int relnum;
RelInfoArr *rel_arr = &ctx->new.dbarr.dbs[dbnum].rel_arr;
RelInfoArr *rel_arr = &new_cluster.dbarr.dbs[dbnum].rel_arr;
for (relnum = 0; relnum < rel_arr->nrels;
relnum++)
@ -365,10 +364,10 @@ check_new_db_is_empty(migratorContext *ctx)
}
}
dbarr_free(&ctx->new.dbarr);
dbarr_free(&new_cluster.dbarr);
if (found)
pg_log(ctx, PG_FATAL, "New cluster is not empty; exiting\n");
pg_log(PG_FATAL, "New cluster is not empty; exiting\n");
}
@ -378,21 +377,21 @@ check_new_db_is_empty(migratorContext *ctx)
* This is particularly useful for tablespace deletion.
*/
void
create_script_for_old_cluster_deletion(migratorContext *ctx,
create_script_for_old_cluster_deletion(
char **deletion_script_file_name)
{
FILE *script = NULL;
int tblnum;
*deletion_script_file_name = pg_malloc(ctx, MAXPGPATH);
*deletion_script_file_name = pg_malloc(MAXPGPATH);
prep_status(ctx, "Creating script to delete old cluster");
prep_status("Creating script to delete old cluster");
snprintf(*deletion_script_file_name, MAXPGPATH, "%s/delete_old_cluster.%s",
ctx->cwd, SCRIPT_EXT);
os_info.cwd, SCRIPT_EXT);
if ((script = fopen(*deletion_script_file_name, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n",
pg_log(PG_FATAL, "Could not create necessary file: %s\n",
*deletion_script_file_name);
#ifndef WIN32
@ -401,31 +400,31 @@ create_script_for_old_cluster_deletion(migratorContext *ctx,
#endif
/* delete old cluster's default tablespace */
fprintf(script, RMDIR_CMD " %s\n", ctx->old.pgdata);
fprintf(script, RMDIR_CMD " %s\n", old_cluster.pgdata);
/* delete old cluster's alternate tablespaces */
for (tblnum = 0; tblnum < ctx->num_tablespaces; tblnum++)
for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++)
{
/*
* Do the old cluster's per-database directories share a directory
* with a new version-specific tablespace?
*/
if (strlen(ctx->old.tablespace_suffix) == 0)
if (strlen(old_cluster.tablespace_suffix) == 0)
{
/* delete per-database directories */
int dbnum;
fprintf(script, "\n");
/* remove PG_VERSION? */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 804)
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
fprintf(script, RM_CMD " %s%s/PG_VERSION\n",
ctx->tablespaces[tblnum], ctx->old.tablespace_suffix);
os_info.tablespaces[tblnum], old_cluster.tablespace_suffix);
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
{
fprintf(script, RMDIR_CMD " %s%s/%d\n",
ctx->tablespaces[tblnum], ctx->old.tablespace_suffix,
ctx->old.dbarr.dbs[dbnum].db_oid);
os_info.tablespaces[tblnum], old_cluster.tablespace_suffix,
old_cluster.dbarr.dbs[dbnum].db_oid);
}
}
else
@ -435,18 +434,18 @@ create_script_for_old_cluster_deletion(migratorContext *ctx,
* or a version-specific subdirectory.
*/
fprintf(script, RMDIR_CMD " %s%s\n",
ctx->tablespaces[tblnum], ctx->old.tablespace_suffix);
os_info.tablespaces[tblnum], old_cluster.tablespace_suffix);
}
fclose(script);
#ifndef WIN32
if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
pg_log(ctx, PG_FATAL, "Could not add execute permission to file: %s\n",
pg_log(PG_FATAL, "Could not add execute permission to file: %s\n",
*deletion_script_file_name);
#endif
check_ok(ctx);
check_ok();
}
@ -458,27 +457,26 @@ create_script_for_old_cluster_deletion(migratorContext *ctx,
* it must match for the old and new servers.
*/
void
check_for_isn_and_int8_passing_mismatch(migratorContext *ctx, Cluster whichCluster)
check_for_isn_and_int8_passing_mismatch(Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for /contrib/isn with bigint-passing mismatch");
prep_status("Checking for /contrib/isn with bigint-passing mismatch");
if (ctx->old.controldata.float8_pass_by_value ==
ctx->new.controldata.float8_pass_by_value)
if (old_cluster.controldata.float8_pass_by_value ==
new_cluster.controldata.float8_pass_by_value)
{
/* no mismatch */
check_ok(ctx);
check_ok();
return;
}
snprintf(output_path, sizeof(output_path), "%s/contrib_isn_and_int8_pass_by_value.txt",
ctx->cwd);
os_info.cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -489,10 +487,10 @@ check_for_isn_and_int8_passing_mismatch(migratorContext *ctx, Cluster whichClust
int i_nspname,
i_proname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
/* Find any functions coming from contrib/isn */
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT n.nspname, p.proname "
"FROM pg_catalog.pg_proc p, "
" pg_catalog.pg_namespace n "
@ -506,7 +504,7 @@ check_for_isn_and_int8_passing_mismatch(migratorContext *ctx, Cluster whichClust
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@ -525,8 +523,8 @@ check_for_isn_and_int8_passing_mismatch(migratorContext *ctx, Cluster whichClust
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"| Your installation contains \"/contrib/isn\" functions\n"
"| which rely on the bigint data type. Your old and\n"
"| new clusters pass bigint values differently so this\n"
@ -538,7 +536,7 @@ check_for_isn_and_int8_passing_mismatch(migratorContext *ctx, Cluster whichClust
"| \t%s\n\n", output_path);
}
else
check_ok(ctx);
check_ok();
}
@ -554,19 +552,18 @@ check_for_isn_and_int8_passing_mismatch(migratorContext *ctx, Cluster whichClust
* tables upgraded by pg_upgrade.
*/
void
check_for_reg_data_type_usage(migratorContext *ctx, Cluster whichCluster)
check_for_reg_data_type_usage(Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for reg* system oid user data types");
prep_status("Checking for reg* system oid user data types");
snprintf(output_path, sizeof(output_path), "%s/tables_using_reg.txt",
ctx->cwd);
os_info.cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -578,9 +575,9 @@ check_for_reg_data_type_usage(migratorContext *ctx, Cluster whichCluster)
i_relname,
i_attname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
@ -608,7 +605,7 @@ check_for_reg_data_type_usage(migratorContext *ctx, Cluster whichCluster)
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@ -628,8 +625,8 @@ check_for_reg_data_type_usage(migratorContext *ctx, Cluster whichCluster)
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"| Your installation contains one of the reg* data types in\n"
"| user tables. These data types reference system oids that\n"
"| are not preserved by pg_upgrade, so this cluster cannot\n"
@ -639,5 +636,5 @@ check_for_reg_data_type_usage(migratorContext *ctx, Cluster whichCluster)
"| \t%s\n\n", output_path);
}
else
check_ok(ctx);
check_ok();
}

View File

@ -11,7 +11,7 @@
#include <ctype.h>
static void putenv2(migratorContext *ctx, const char *var, const char *val);
static void putenv2(const char *var, const char *val);
/*
* get_control_data()
@ -31,7 +31,7 @@ static void putenv2(migratorContext *ctx, const char *var, const char *val);
* return valid xid data for a running server.
*/
void
get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
get_control_data(ClusterInfo *cluster, bool live_check)
{
char cmd[MAXPGPATH];
char bufin[MAX_STRING];
@ -67,39 +67,39 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
* English. Copied from pg_regress.c.
*/
if (getenv("LC_COLLATE"))
lc_collate = pg_strdup(ctx, getenv("LC_COLLATE"));
lc_collate = pg_strdup(getenv("LC_COLLATE"));
if (getenv("LC_CTYPE"))
lc_ctype = pg_strdup(ctx, getenv("LC_CTYPE"));
lc_ctype = pg_strdup(getenv("LC_CTYPE"));
if (getenv("LC_MONETARY"))
lc_monetary = pg_strdup(ctx, getenv("LC_MONETARY"));
lc_monetary = pg_strdup(getenv("LC_MONETARY"));
if (getenv("LC_NUMERIC"))
lc_numeric = pg_strdup(ctx, getenv("LC_NUMERIC"));
lc_numeric = pg_strdup(getenv("LC_NUMERIC"));
if (getenv("LC_TIME"))
lc_time = pg_strdup(ctx, getenv("LC_TIME"));
lc_time = pg_strdup(getenv("LC_TIME"));
if (getenv("LANG"))
lang = pg_strdup(ctx, getenv("LANG"));
lang = pg_strdup(getenv("LANG"));
if (getenv("LANGUAGE"))
language = pg_strdup(ctx, getenv("LANGUAGE"));
language = pg_strdup(getenv("LANGUAGE"));
if (getenv("LC_ALL"))
lc_all = pg_strdup(ctx, getenv("LC_ALL"));
lc_all = pg_strdup(getenv("LC_ALL"));
if (getenv("LC_MESSAGES"))
lc_messages = pg_strdup(ctx, getenv("LC_MESSAGES"));
lc_messages = pg_strdup(getenv("LC_MESSAGES"));
putenv2(ctx, "LC_COLLATE", NULL);
putenv2(ctx, "LC_CTYPE", NULL);
putenv2(ctx, "LC_MONETARY", NULL);
putenv2(ctx, "LC_NUMERIC", NULL);
putenv2(ctx, "LC_TIME", NULL);
putenv2(ctx, "LANG",
putenv2("LC_COLLATE", NULL);
putenv2("LC_CTYPE", NULL);
putenv2("LC_MONETARY", NULL);
putenv2("LC_NUMERIC", NULL);
putenv2("LC_TIME", NULL);
putenv2("LANG",
#ifndef WIN32
NULL);
#else
/* On Windows the default locale cannot be English, so force it */
"en");
#endif
putenv2(ctx, "LANGUAGE", NULL);
putenv2(ctx, "LC_ALL", NULL);
putenv2(ctx, "LC_MESSAGES", "C");
putenv2("LANGUAGE", NULL);
putenv2("LC_ALL", NULL);
putenv2("LC_MESSAGES", "C");
snprintf(cmd, sizeof(cmd), SYSTEMQUOTE "\"%s/%s \"%s\"" SYSTEMQUOTE,
cluster->bindir,
@ -109,7 +109,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
fflush(stderr);
if ((output = popen(cmd, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Could not get control data: %s\n",
pg_log(PG_FATAL, "Could not get control data: %s\n",
getErrorText(errno));
/* Only pre-8.4 has these so if they are not set below we will check later */
@ -126,8 +126,8 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
/* we have the result of cmd in "output". so parse it line by line now */
while (fgets(bufin, sizeof(bufin), output))
{
if (ctx->debug)
fputs(bufin, ctx->debug_fd);
if (log.debug)
fputs(bufin, log.debug_fd);
#ifdef WIN32
@ -140,7 +140,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
{
for (p = bufin; *p; p++)
if (!isascii(*p))
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n"
"with LANG=C. You must upgrade this cluster to a newer version of Postgres\n"
"8.3 to fix this bug. Postgres 8.3.7 and later are known to work properly.\n");
@ -152,7 +152,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: pg_resetxlog problem\n", __LINE__);
pg_log(PG_FATAL, "%d: pg_resetxlog problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.ctrl_ver = str2uint(p);
@ -162,7 +162,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.cat_ver = str2uint(p);
@ -172,7 +172,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.logid = str2uint(p);
@ -183,7 +183,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.nxtlogseg = str2uint(p);
@ -194,7 +194,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.chkpnt_tli = str2uint(p);
@ -208,7 +208,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
op = strchr(p, ':');
if (op == NULL || strlen(op) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
op++; /* removing ':' char */
cluster->controldata.chkpnt_nxtxid = str2uint(op);
@ -219,7 +219,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.chkpnt_nxtoid = str2uint(p);
@ -230,7 +230,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.align = str2uint(p);
@ -241,7 +241,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.blocksz = str2uint(p);
@ -252,7 +252,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.largesz = str2uint(p);
@ -263,7 +263,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.walsz = str2uint(p);
@ -274,7 +274,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.walseg = str2uint(p);
@ -285,7 +285,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.ident = str2uint(p);
@ -296,7 +296,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.index = str2uint(p);
@ -307,7 +307,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.toast = str2uint(p);
@ -318,7 +318,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.date_is_int = strstr(p, "64-bit integers") != NULL;
@ -329,7 +329,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* used later for /contrib check */
@ -342,14 +342,14 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* skip leading spaces and remove trailing newline */
p += strspn(p, " ");
if (strlen(p) > 0 && *(p + strlen(p) - 1) == '\n')
*(p + strlen(p) - 1) = '\0';
cluster->controldata.lc_collate = pg_strdup(ctx, p);
cluster->controldata.lc_collate = pg_strdup(p);
}
/* In pre-8.4 only */
else if ((p = strstr(bufin, "LC_CTYPE:")) != NULL)
@ -357,14 +357,14 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* skip leading spaces and remove trailing newline */
p += strspn(p, " ");
if (strlen(p) > 0 && *(p + strlen(p) - 1) == '\n')
*(p + strlen(p) - 1) = '\0';
cluster->controldata.lc_ctype = pg_strdup(ctx, p);
cluster->controldata.lc_ctype = pg_strdup(p);
}
}
@ -374,15 +374,15 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
/*
* Restore environment variables
*/
putenv2(ctx, "LC_COLLATE", lc_collate);
putenv2(ctx, "LC_CTYPE", lc_ctype);
putenv2(ctx, "LC_MONETARY", lc_monetary);
putenv2(ctx, "LC_NUMERIC", lc_numeric);
putenv2(ctx, "LC_TIME", lc_time);
putenv2(ctx, "LANG", lang);
putenv2(ctx, "LANGUAGE", language);
putenv2(ctx, "LC_ALL", lc_all);
putenv2(ctx, "LC_MESSAGES", lc_messages);
putenv2("LC_COLLATE", lc_collate);
putenv2("LC_CTYPE", lc_ctype);
putenv2("LC_MONETARY", lc_monetary);
putenv2("LC_NUMERIC", lc_numeric);
putenv2("LC_TIME", lc_time);
putenv2("LANG", lang);
putenv2("LANGUAGE", language);
putenv2("LC_ALL", lc_all);
putenv2("LC_MESSAGES", lc_messages);
pg_free(lc_collate);
pg_free(lc_ctype);
@ -403,56 +403,56 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
!got_walseg || !got_ident || !got_index || !got_toast ||
!got_date_is_int || !got_float8_pass_by_value)
{
pg_log(ctx, PG_REPORT,
pg_log(PG_REPORT,
"Some required control information is missing; cannot find:\n");
if (!got_xid)
pg_log(ctx, PG_REPORT, " checkpoint next XID\n");
pg_log(PG_REPORT, " checkpoint next XID\n");
if (!got_oid)
pg_log(ctx, PG_REPORT, " latest checkpoint next OID\n");
pg_log(PG_REPORT, " latest checkpoint next OID\n");
if (!live_check && !got_log_id)
pg_log(ctx, PG_REPORT, " first log file ID after reset\n");
pg_log(PG_REPORT, " first log file ID after reset\n");
if (!live_check && !got_log_seg)
pg_log(ctx, PG_REPORT, " first log file segment after reset\n");
pg_log(PG_REPORT, " first log file segment after reset\n");
if (!got_tli)
pg_log(ctx, PG_REPORT, " latest checkpoint timeline ID\n");
pg_log(PG_REPORT, " latest checkpoint timeline ID\n");
if (!got_align)
pg_log(ctx, PG_REPORT, " maximum alignment\n");
pg_log(PG_REPORT, " maximum alignment\n");
if (!got_blocksz)
pg_log(ctx, PG_REPORT, " block size\n");
pg_log(PG_REPORT, " block size\n");
if (!got_largesz)
pg_log(ctx, PG_REPORT, " large relation segment size\n");
pg_log(PG_REPORT, " large relation segment size\n");
if (!got_walsz)
pg_log(ctx, PG_REPORT, " WAL block size\n");
pg_log(PG_REPORT, " WAL block size\n");
if (!got_walseg)
pg_log(ctx, PG_REPORT, " WAL segment size\n");
pg_log(PG_REPORT, " WAL segment size\n");
if (!got_ident)
pg_log(ctx, PG_REPORT, " maximum identifier length\n");
pg_log(PG_REPORT, " maximum identifier length\n");
if (!got_index)
pg_log(ctx, PG_REPORT, " maximum number of indexed columns\n");
pg_log(PG_REPORT, " maximum number of indexed columns\n");
if (!got_toast)
pg_log(ctx, PG_REPORT, " maximum TOAST chunk size\n");
pg_log(PG_REPORT, " maximum TOAST chunk size\n");
if (!got_date_is_int)
pg_log(ctx, PG_REPORT, " dates/times are integers?\n");
pg_log(PG_REPORT, " dates/times are integers?\n");
/* value added in Postgres 8.4 */
if (!got_float8_pass_by_value)
pg_log(ctx, PG_REPORT, " float8 argument passing method\n");
pg_log(PG_REPORT, " float8 argument passing method\n");
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"Unable to continue without required control information, terminating\n");
}
}
@ -464,51 +464,51 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
* check to make sure the control data settings are compatible
*/
void
check_control_data(migratorContext *ctx, ControlData *oldctrl,
check_control_data(ControlData *oldctrl,
ControlData *newctrl)
{
if (oldctrl->align == 0 || oldctrl->align != newctrl->align)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new pg_controldata alignments are invalid or do not match\n");
if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new pg_controldata block sizes are invalid or do not match\n");
if (oldctrl->largesz == 0 || oldctrl->largesz != newctrl->largesz)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new pg_controldata maximum relation segement sizes are invalid or do not match\n");
if (oldctrl->walsz == 0 || oldctrl->walsz != newctrl->walsz)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new pg_controldata WAL block sizes are invalid or do not match\n");
if (oldctrl->walseg == 0 || oldctrl->walseg != newctrl->walseg)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new pg_controldata WAL segment sizes are invalid or do not match\n");
if (oldctrl->ident == 0 || oldctrl->ident != newctrl->ident)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new pg_controldata maximum identifier lengths are invalid or do not match\n");
if (oldctrl->index == 0 || oldctrl->index != newctrl->index)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new pg_controldata maximum indexed columns are invalid or do not match\n");
if (oldctrl->toast == 0 || oldctrl->toast != newctrl->toast)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"old and new pg_controldata maximum TOAST chunk sizes are invalid or do not match\n");
if (oldctrl->date_is_int != newctrl->date_is_int)
{
pg_log(ctx, PG_WARNING,
pg_log(PG_WARNING,
"\nOld and new pg_controldata date/time storage types do not match.\n");
/*
* This is a common 8.3 -> 8.4 migration problem, so we are more
* verboase
*/
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"You will need to rebuild the new server with configure\n"
"--disable-integer-datetimes or get server binaries built\n"
"with those options.\n");
@ -517,18 +517,18 @@ check_control_data(migratorContext *ctx, ControlData *oldctrl,
void
rename_old_pg_control(migratorContext *ctx)
rename_old_pg_control(void)
{
char old_path[MAXPGPATH],
new_path[MAXPGPATH];
prep_status(ctx, "Adding \".old\" suffix to old global/pg_control");
prep_status("Adding \".old\" suffix to old global/pg_control");
snprintf(old_path, sizeof(old_path), "%s/global/pg_control", ctx->old.pgdata);
snprintf(new_path, sizeof(new_path), "%s/global/pg_control.old", ctx->old.pgdata);
snprintf(old_path, sizeof(old_path), "%s/global/pg_control", old_cluster.pgdata);
snprintf(new_path, sizeof(new_path), "%s/global/pg_control.old", old_cluster.pgdata);
if (pg_mv_file(old_path, new_path) != 0)
pg_log(ctx, PG_FATAL, "Unable to rename %s to %s.\n", old_path, new_path);
check_ok(ctx);
pg_log(PG_FATAL, "Unable to rename %s to %s.\n", old_path, new_path);
check_ok();
}
@ -539,12 +539,12 @@ rename_old_pg_control(migratorContext *ctx)
* It also does unsetenv() if val is NULL.
*/
static void
putenv2(migratorContext *ctx, const char *var, const char *val)
putenv2(const char *var, const char *val)
{
if (val)
{
#ifndef WIN32
char *envstr = (char *) pg_malloc(ctx, strlen(var) +
char *envstr = (char *) pg_malloc(strlen(var) +
strlen(val) + 1);
sprintf(envstr, "%s=%s", var, val);

View File

@ -12,20 +12,20 @@
void
generate_old_dump(migratorContext *ctx)
generate_old_dump(void)
{
/* run new pg_dumpall binary */
prep_status(ctx, "Creating catalog dump");
prep_status("Creating catalog dump");
/*
* --binary-upgrade records the width of dropped columns in pg_class, and
* restores the frozenid's for databases and relations.
*/
exec_prog(ctx, true,
exec_prog(true,
SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --username \"%s\" "
"--schema-only --binary-upgrade > \"%s/" ALL_DUMP_FILE "\""
SYSTEMQUOTE, ctx->new.bindir, ctx->old.port, ctx->user, ctx->cwd);
check_ok(ctx);
SYSTEMQUOTE, new_cluster.bindir, old_cluster.port, os_info.user, os_info.cwd);
check_ok();
}
@ -42,7 +42,7 @@ generate_old_dump(migratorContext *ctx)
* an error during restore
*/
void
split_old_dump(migratorContext *ctx)
split_old_dump(void)
{
FILE *all_dump,
*globals_dump,
@ -55,22 +55,22 @@ split_old_dump(migratorContext *ctx)
char filename[MAXPGPATH];
bool suppressed_username = false;
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, ALL_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, ALL_DUMP_FILE);
if ((all_dump = fopen(filename, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot open dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, GLOBALS_DUMP_FILE);
pg_log(PG_FATAL, "Cannot open dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, GLOBALS_DUMP_FILE);
if ((globals_dump = fopen(filename, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, DB_DUMP_FILE);
pg_log(PG_FATAL, "Cannot write to dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, DB_DUMP_FILE);
if ((db_dump = fopen(filename, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename);
pg_log(PG_FATAL, "Cannot write to dump file %s\n", filename);
current_output = globals_dump;
/* patterns used to prevent our own username from being recreated */
snprintf(create_role_str, sizeof(create_role_str),
"CREATE ROLE %s;", ctx->user);
"CREATE ROLE %s;", os_info.user);
snprintf(create_role_str_quote, sizeof(create_role_str_quote),
"CREATE ROLE %s;", quote_identifier(ctx, ctx->user));
"CREATE ROLE %s;", quote_identifier(os_info.user));
while (fgets(line, sizeof(line), all_dump) != NULL)
{

View File

@ -13,9 +13,9 @@
#include <grp.h>
static void check_data_dir(migratorContext *ctx, const char *pg_data);
static void check_bin_dir(migratorContext *ctx, ClusterInfo *cluster);
static int check_exec(migratorContext *ctx, const char *dir, const char *cmdName);
static void check_data_dir(const char *pg_data);
static void check_bin_dir(ClusterInfo *cluster);
static int check_exec(const char *dir, const char *cmdName);
static const char *validate_exec(const char *path);
@ -30,7 +30,7 @@ static const char *validate_exec(const char *path);
* instead of returning should an error occur.
*/
int
exec_prog(migratorContext *ctx, bool throw_error, const char *fmt,...)
exec_prog(bool throw_error, const char *fmt,...)
{
va_list args;
int result;
@ -40,13 +40,13 @@ exec_prog(migratorContext *ctx, bool throw_error, const char *fmt,...)
vsnprintf(cmd, MAXPGPATH, fmt, args);
va_end(args);
pg_log(ctx, PG_INFO, "%s\n", cmd);
pg_log(PG_INFO, "%s\n", cmd);
result = system(cmd);
if (result != 0)
{
pg_log(ctx, throw_error ? PG_FATAL : PG_INFO,
pg_log(throw_error ? PG_FATAL : PG_INFO,
"\nThere were problems executing %s\n", cmd);
return 1;
}
@ -62,7 +62,7 @@ exec_prog(migratorContext *ctx, bool throw_error, const char *fmt,...)
* The check is performed by looking for the existence of postmaster.pid file.
*/
bool
is_server_running(migratorContext *ctx, const char *datadir)
is_server_running(const char *datadir)
{
char path[MAXPGPATH];
int fd;
@ -72,7 +72,7 @@ is_server_running(migratorContext *ctx, const char *datadir)
if ((fd = open(path, O_RDONLY, 0)) < 0)
{
if (errno != ENOENT)
pg_log(ctx, PG_FATAL, "\ncould not open file \"%s\" for reading\n",
pg_log(PG_FATAL, "\ncould not open file \"%s\" for reading\n",
path);
return false;
@ -92,23 +92,23 @@ is_server_running(migratorContext *ctx, const char *datadir)
* NOTE: May update the values of all parameters
*/
void
verify_directories(migratorContext *ctx)
verify_directories(void)
{
prep_status(ctx, "Checking old data directory (%s)", ctx->old.pgdata);
check_data_dir(ctx, ctx->old.pgdata);
check_ok(ctx);
prep_status("Checking old data directory (%s)", old_cluster.pgdata);
check_data_dir(old_cluster.pgdata);
check_ok();
prep_status(ctx, "Checking old bin directory (%s)", ctx->old.bindir);
check_bin_dir(ctx, &ctx->old);
check_ok(ctx);
prep_status("Checking old bin directory (%s)", old_cluster.bindir);
check_bin_dir(&old_cluster);
check_ok();
prep_status(ctx, "Checking new data directory (%s)", ctx->new.pgdata);
check_data_dir(ctx, ctx->new.pgdata);
check_ok(ctx);
prep_status("Checking new data directory (%s)", new_cluster.pgdata);
check_data_dir(new_cluster.pgdata);
check_ok();
prep_status(ctx, "Checking new bin directory (%s)", ctx->new.bindir);
check_bin_dir(ctx, &ctx->new);
check_ok(ctx);
prep_status("Checking new bin directory (%s)", new_cluster.bindir);
check_bin_dir(&new_cluster);
check_ok();
}
@ -122,7 +122,7 @@ verify_directories(migratorContext *ctx)
*
*/
static void
check_data_dir(migratorContext *ctx, const char *pg_data)
check_data_dir(const char *pg_data)
{
char subDirName[MAXPGPATH];
int subdirnum;
@ -140,10 +140,10 @@ check_data_dir(migratorContext *ctx, const char *pg_data)
requiredSubdirs[subdirnum]);
if (stat(subDirName, &statBuf) != 0)
report_status(ctx, PG_FATAL, "check for %s failed: %s",
report_status(PG_FATAL, "check for %s failed: %s",
requiredSubdirs[subdirnum], getErrorText(errno));
else if (!S_ISDIR(statBuf.st_mode))
report_status(ctx, PG_FATAL, "%s is not a directory",
report_status(PG_FATAL, "%s is not a directory",
requiredSubdirs[subdirnum]);
}
}
@ -158,12 +158,12 @@ check_data_dir(migratorContext *ctx, const char *pg_data)
* exit().
*/
static void
check_bin_dir(migratorContext *ctx, ClusterInfo *cluster)
check_bin_dir(ClusterInfo *cluster)
{
check_exec(ctx, cluster->bindir, "postgres");
check_exec(ctx, cluster->bindir, "psql");
check_exec(ctx, cluster->bindir, "pg_ctl");
check_exec(ctx, cluster->bindir, "pg_dumpall");
check_exec(cluster->bindir, "postgres");
check_exec(cluster->bindir, "psql");
check_exec(cluster->bindir, "pg_ctl");
check_exec(cluster->bindir, "pg_dumpall");
}
@ -177,7 +177,7 @@ check_bin_dir(migratorContext *ctx, ClusterInfo *cluster)
* a valid executable, this function returns 0 to indicated failure.
*/
static int
check_exec(migratorContext *ctx, const char *dir, const char *cmdName)
check_exec(const char *dir, const char *cmdName)
{
char path[MAXPGPATH];
const char *errMsg;
@ -187,7 +187,7 @@ check_exec(migratorContext *ctx, const char *dir, const char *cmdName)
if ((errMsg = validate_exec(path)) == NULL)
return 1; /* 1 -> first alternative OK */
else
pg_log(ctx, PG_FATAL, "check for %s failed - %s\n", cmdName, errMsg);
pg_log(PG_FATAL, "check for %s failed - %s\n", cmdName, errMsg);
return 0; /* 0 -> neither alternative is acceptable */
}

View File

@ -22,7 +22,7 @@ static int copy_dir(const char *from, const char *to, bool force);
#endif
#ifndef HAVE_SCANDIR
static int pg_scandir_internal(migratorContext *ctx, const char *dirname,
static int pg_scandir_internal(const char *dirname,
struct dirent *** namelist,
int (*selector) (const struct dirent *));
#endif
@ -35,7 +35,7 @@ static int pg_scandir_internal(migratorContext *ctx, const char *dirname,
* uses that pageConverter to do a page-by-page conversion.
*/
const char *
copyAndUpdateFile(migratorContext *ctx, pageCnvCtx *pageConverter,
copyAndUpdateFile(pageCnvCtx *pageConverter,
const char *src, const char *dst, bool force)
{
if (pageConverter == NULL)
@ -116,7 +116,7 @@ copyAndUpdateFile(migratorContext *ctx, pageCnvCtx *pageConverter,
* instead of copying the data from the old cluster to the new cluster.
*/
const char *
linkAndUpdateFile(migratorContext *ctx, pageCnvCtx *pageConverter,
linkAndUpdateFile(pageCnvCtx *pageConverter,
const char *src, const char *dst)
{
if (pageConverter != NULL)
@ -231,12 +231,12 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
* Wrapper for portable scandir functionality
*/
int
pg_scandir(migratorContext *ctx, const char *dirname,
pg_scandir(const char *dirname,
struct dirent *** namelist,
int (*selector) (const struct dirent *))
{
#ifndef HAVE_SCANDIR
return pg_scandir_internal(ctx, dirname, namelist, selector);
return pg_scandir_internal(dirname, namelist, selector);
/*
* scandir() is originally from BSD 4.3, which had the third argument as
@ -277,7 +277,7 @@ pg_scandir(migratorContext *ctx, const char *dirname,
* .2, etc.) and should therefore be invoked a small number of times.
*/
static int
pg_scandir_internal(migratorContext *ctx, const char *dirname,
pg_scandir_internal(const char *dirname,
struct dirent *** namelist, int (*selector) (const struct dirent *))
{
DIR *dirdesc;
@ -287,7 +287,7 @@ pg_scandir_internal(migratorContext *ctx, const char *dirname,
size_t entrysize;
if ((dirdesc = opendir(dirname)) == NULL)
pg_log(ctx, PG_FATAL, "Could not open directory \"%s\": %m\n", dirname);
pg_log(PG_FATAL, "Could not open directory \"%s\": %m\n", dirname);
*namelist = NULL;
@ -342,18 +342,18 @@ dir_matching_filenames(const struct dirent * scan_ent)
void
check_hard_link(migratorContext *ctx)
check_hard_link(void)
{
char existing_file[MAXPGPATH];
char new_link_file[MAXPGPATH];
snprintf(existing_file, sizeof(existing_file), "%s/PG_VERSION", ctx->old.pgdata);
snprintf(new_link_file, sizeof(new_link_file), "%s/PG_VERSION.linktest", ctx->new.pgdata);
snprintf(existing_file, sizeof(existing_file), "%s/PG_VERSION", old_cluster.pgdata);
snprintf(new_link_file, sizeof(new_link_file), "%s/PG_VERSION.linktest", new_cluster.pgdata);
unlink(new_link_file); /* might fail */
if (pg_link_file(existing_file, new_link_file) == -1)
{
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"Could not create hard link between old and new data directories: %s\n"
"In link mode the old and new data directories must be on the same file system volume.\n",
getErrorText(errno));

View File

@ -19,65 +19,65 @@
* backend behavior.
*/
void
install_support_functions(migratorContext *ctx)
install_support_functions(void)
{
int dbnum;
prep_status(ctx, "Adding support functions to new cluster");
prep_status("Adding support functions to new cluster");
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
{
DbInfo *newdb = &ctx->new.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, newdb->db_name, CLUSTER_NEW);
DbInfo *newdb = &new_cluster.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(newdb->db_name, CLUSTER_NEW);
/* suppress NOTICE of dropped objects */
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"SET client_min_messages = warning;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"DROP SCHEMA IF EXISTS binary_upgrade CASCADE;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"RESET client_min_messages;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"CREATE SCHEMA binary_upgrade;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_array_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_toast_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_heap_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_toast_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_index_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.add_pg_enum_label(OID, OID, NAME) "
"RETURNS VOID "
@ -85,32 +85,32 @@ install_support_functions(migratorContext *ctx)
"LANGUAGE C STRICT;"));
PQfinish(conn);
}
check_ok(ctx);
check_ok();
}
void
uninstall_support_functions(migratorContext *ctx)
uninstall_support_functions(void)
{
int dbnum;
prep_status(ctx, "Removing support functions from new cluster");
prep_status("Removing support functions from new cluster");
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
{
DbInfo *newdb = &ctx->new.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, newdb->db_name, CLUSTER_NEW);
DbInfo *newdb = &new_cluster.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(newdb->db_name, CLUSTER_NEW);
/* suppress NOTICE of dropped objects */
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"SET client_min_messages = warning;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"DROP SCHEMA binary_upgrade CASCADE;"));
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"RESET client_min_messages;"));
PQfinish(conn);
}
check_ok(ctx);
check_ok();
}
@ -121,25 +121,25 @@ uninstall_support_functions(migratorContext *ctx)
* We will later check that they all exist in the new installation.
*/
void
get_loadable_libraries(migratorContext *ctx)
get_loadable_libraries(void)
{
ClusterInfo *active_cluster = &ctx->old;
ClusterInfo *active_cluster = &old_cluster;
PGresult **ress;
int totaltups;
int dbnum;
ress = (PGresult **)
pg_malloc(ctx, active_cluster->dbarr.ndbs * sizeof(PGresult *));
pg_malloc(active_cluster->dbarr.ndbs * sizeof(PGresult *));
totaltups = 0;
/* Fetch all library names, removing duplicates within each DB */
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, CLUSTER_OLD);
PGconn *conn = connectToServer(active_db->db_name, CLUSTER_OLD);
/* Fetch all libraries referenced in this DB */
ress[dbnum] = executeQueryOrDie(ctx, conn,
ress[dbnum] = executeQueryOrDie(conn,
"SELECT DISTINCT probin "
"FROM pg_catalog.pg_proc "
"WHERE prolang = 13 /* C */ AND "
@ -153,9 +153,9 @@ get_loadable_libraries(migratorContext *ctx)
/* Allocate what's certainly enough space */
if (totaltups > 0)
ctx->libraries = (char **) pg_malloc(ctx, totaltups * sizeof(char *));
os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *));
else
ctx->libraries = NULL;
os_info.libraries = NULL;
/*
* Now remove duplicates across DBs. This is pretty inefficient code, but
@ -178,20 +178,20 @@ get_loadable_libraries(migratorContext *ctx)
for (n = 0; n < totaltups; n++)
{
if (strcmp(lib, ctx->libraries[n]) == 0)
if (strcmp(lib, os_info.libraries[n]) == 0)
{
dup = true;
break;
}
}
if (!dup)
ctx->libraries[totaltups++] = pg_strdup(ctx, lib);
os_info.libraries[totaltups++] = pg_strdup(lib);
}
PQclear(res);
}
ctx->num_libraries = totaltups;
os_info.num_libraries = totaltups;
pg_free(ress);
}
@ -205,24 +205,24 @@ get_loadable_libraries(migratorContext *ctx)
* compatibility as well as presence.
*/
void
check_loadable_libraries(migratorContext *ctx)
check_loadable_libraries(void)
{
PGconn *conn = connectToServer(ctx, "template1", CLUSTER_NEW);
PGconn *conn = connectToServer("template1", CLUSTER_NEW);
int libnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for presence of required libraries");
prep_status("Checking for presence of required libraries");
snprintf(output_path, sizeof(output_path), "%s/loadable_libraries.txt",
ctx->cwd);
os_info.cwd);
for (libnum = 0; libnum < ctx->num_libraries; libnum++)
for (libnum = 0; libnum < os_info.num_libraries; libnum++)
{
char *lib = ctx->libraries[libnum];
char *lib = os_info.libraries[libnum];
int llen = strlen(lib);
char *cmd = (char *) pg_malloc(ctx, 8 + 2 * llen + 1);
char *cmd = (char *) pg_malloc(8 + 2 * llen + 1);
PGresult *res;
strcpy(cmd, "LOAD '");
@ -235,7 +235,7 @@ check_loadable_libraries(migratorContext *ctx)
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n",
pg_log(PG_FATAL, "Could not create necessary file: %s\n",
output_path);
fprintf(script, "Failed to load library: %s\n%s\n",
lib,
@ -251,8 +251,8 @@ check_loadable_libraries(migratorContext *ctx)
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"| Your installation references loadable libraries that are missing\n"
"| from the new installation. You can add these libraries to\n"
"| the new installation, or remove the functions using them\n"
@ -261,5 +261,5 @@ check_loadable_libraries(migratorContext *ctx)
"| \"%s\".\n\n", output_path);
}
else
check_ok(ctx);
check_ok();
}

View File

@ -12,27 +12,27 @@
#include "access/transam.h"
static void get_db_infos(migratorContext *ctx, DbInfoArr *dbinfos,
static void get_db_infos(DbInfoArr *dbinfos,
Cluster whichCluster);
static void dbarr_print(migratorContext *ctx, DbInfoArr *arr,
static void dbarr_print(DbInfoArr *arr,
Cluster whichCluster);
static void relarr_print(migratorContext *ctx, RelInfoArr *arr);
static void get_rel_infos(migratorContext *ctx, const DbInfo *dbinfo,
static void relarr_print(RelInfoArr *arr);
static void get_rel_infos(const DbInfo *dbinfo,
RelInfoArr *relarr, Cluster whichCluster);
static void relarr_free(RelInfoArr *rel_arr);
static void map_rel(migratorContext *ctx, const RelInfo *oldrel,
static void map_rel(const RelInfo *oldrel,
const RelInfo *newrel, const DbInfo *old_db,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map);
static void map_rel_by_id(migratorContext *ctx, Oid oldid, Oid newid,
static void map_rel_by_id(Oid oldid, Oid newid,
const char *old_nspname, const char *old_relname,
const char *new_nspname, const char *new_relname,
const char *old_tablespace, const DbInfo *old_db,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map);
static RelInfo *relarr_lookup_reloid(migratorContext *ctx,
static RelInfo *relarr_lookup_reloid(
RelInfoArr *rel_arr, Oid oid, Cluster whichCluster);
static RelInfo *relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
static RelInfo *relarr_lookup_rel(RelInfoArr *rel_arr,
const char *nspname, const char *relname,
Cluster whichCluster);
@ -47,14 +47,14 @@ static RelInfo *relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
* NOTE: Its the Caller's responsibility to free the returned array.
*/
FileNameMap *
gen_db_file_maps(migratorContext *ctx, DbInfo *old_db, DbInfo *new_db,
gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
int *nmaps, const char *old_pgdata, const char *new_pgdata)
{
FileNameMap *maps;
int relnum;
int num_maps = 0;
maps = (FileNameMap *) pg_malloc(ctx, sizeof(FileNameMap) *
maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) *
new_db->rel_arr.nrels);
for (relnum = 0; relnum < new_db->rel_arr.nrels; relnum++)
@ -66,10 +66,10 @@ gen_db_file_maps(migratorContext *ctx, DbInfo *old_db, DbInfo *new_db,
if (strcmp(newrel->nspname, "pg_toast") == 0)
continue;
oldrel = relarr_lookup_rel(ctx, &(old_db->rel_arr), newrel->nspname,
oldrel = relarr_lookup_rel(&(old_db->rel_arr), newrel->nspname,
newrel->relname, CLUSTER_OLD);
map_rel(ctx, oldrel, newrel, old_db, new_db, old_pgdata, new_pgdata,
map_rel(oldrel, newrel, old_db, new_db, old_pgdata, new_pgdata,
maps + num_maps);
num_maps++;
@ -91,13 +91,13 @@ gen_db_file_maps(migratorContext *ctx, DbInfo *old_db, DbInfo *new_db,
newrel->reloid);
/* look them up in their respective arrays */
old_toast = relarr_lookup_reloid(ctx, &old_db->rel_arr,
old_toast = relarr_lookup_reloid(&old_db->rel_arr,
oldrel->toastrelid, CLUSTER_OLD);
new_toast = relarr_lookup_rel(ctx, &new_db->rel_arr,
new_toast = relarr_lookup_rel(&new_db->rel_arr,
"pg_toast", new_name, CLUSTER_NEW);
/* finally create a mapping for them */
map_rel(ctx, old_toast, new_toast, old_db, new_db, old_pgdata, new_pgdata,
map_rel(old_toast, new_toast, old_db, new_db, old_pgdata, new_pgdata,
maps + num_maps);
num_maps++;
@ -117,13 +117,13 @@ gen_db_file_maps(migratorContext *ctx, DbInfo *old_db, DbInfo *new_db,
newrel->reloid);
/* look them up in their respective arrays */
old_toast = relarr_lookup_rel(ctx, &old_db->rel_arr,
old_toast = relarr_lookup_rel(&old_db->rel_arr,
"pg_toast", old_name, CLUSTER_OLD);
new_toast = relarr_lookup_rel(ctx, &new_db->rel_arr,
new_toast = relarr_lookup_rel(&new_db->rel_arr,
"pg_toast", new_name, CLUSTER_NEW);
/* finally create a mapping for them */
map_rel(ctx, old_toast, new_toast, old_db, new_db, old_pgdata,
map_rel(old_toast, new_toast, old_db, new_db, old_pgdata,
new_pgdata, maps + num_maps);
num_maps++;
}
@ -135,11 +135,11 @@ gen_db_file_maps(migratorContext *ctx, DbInfo *old_db, DbInfo *new_db,
static void
map_rel(migratorContext *ctx, const RelInfo *oldrel, const RelInfo *newrel,
map_rel(const RelInfo *oldrel, const RelInfo *newrel,
const DbInfo *old_db, const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map)
{
map_rel_by_id(ctx, oldrel->relfilenode, newrel->relfilenode, oldrel->nspname,
map_rel_by_id(oldrel->relfilenode, newrel->relfilenode, oldrel->nspname,
oldrel->relname, newrel->nspname, newrel->relname, oldrel->tablespace, old_db,
new_db, olddata, newdata, map);
}
@ -151,15 +151,15 @@ map_rel(migratorContext *ctx, const RelInfo *oldrel, const RelInfo *newrel,
* fills a file node map structure and returns it in "map".
*/
static void
map_rel_by_id(migratorContext *ctx, Oid oldid, Oid newid,
map_rel_by_id(Oid oldid, Oid newid,
const char *old_nspname, const char *old_relname,
const char *new_nspname, const char *new_relname,
const char *old_tablespace, const DbInfo *old_db,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map)
{
map->new = newid;
map->old = oldid;
map->old_relfilenode = oldid;
map->new_relfilenode = newid;
snprintf(map->old_nspname, sizeof(map->old_nspname), "%s", old_nspname);
snprintf(map->old_relname, sizeof(map->old_relname), "%s", old_relname);
@ -182,28 +182,30 @@ map_rel_by_id(migratorContext *ctx, Oid oldid, Oid newid,
* location
*/
snprintf(map->old_file, sizeof(map->old_file), "%s%s/%u", old_tablespace,
ctx->old.tablespace_suffix, old_db->db_oid);
old_cluster.tablespace_suffix, old_db->db_oid);
snprintf(map->new_file, sizeof(map->new_file), "%s%s/%u", old_tablespace,
ctx->new.tablespace_suffix, new_db->db_oid);
new_cluster.tablespace_suffix, new_db->db_oid);
}
}
void
print_maps(migratorContext *ctx, FileNameMap *maps, int n, const char *dbName)
print_maps(FileNameMap *maps, int n, const char *dbName)
{
if (ctx->debug)
if (log.debug)
{
int mapnum;
pg_log(ctx, PG_DEBUG, "mappings for db %s:\n", dbName);
pg_log(PG_DEBUG, "mappings for db %s:\n", dbName);
for (mapnum = 0; mapnum < n; mapnum++)
pg_log(ctx, PG_DEBUG, "%s.%s:%u ==> %s.%s:%u\n",
maps[mapnum].old_nspname, maps[mapnum].old_relname, maps[mapnum].old,
maps[mapnum].new_nspname, maps[mapnum].new_relname, maps[mapnum].new);
pg_log(PG_DEBUG, "%s.%s:%u ==> %s.%s:%u\n",
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].old_relfilenode,
maps[mapnum].new_nspname, maps[mapnum].new_relname,
maps[mapnum].new_relfilenode);
pg_log(ctx, PG_DEBUG, "\n\n");
pg_log(PG_DEBUG, "\n\n");
}
}
@ -215,9 +217,9 @@ print_maps(migratorContext *ctx, FileNameMap *maps, int n, const char *dbName)
* databases.
*/
static void
get_db_infos(migratorContext *ctx, DbInfoArr *dbinfs_arr, Cluster whichCluster)
get_db_infos(DbInfoArr *dbinfs_arr, Cluster whichCluster)
{
PGconn *conn = connectToServer(ctx, "template1", whichCluster);
PGconn *conn = connectToServer("template1", whichCluster);
PGresult *res;
int ntups;
int tupnum;
@ -226,7 +228,7 @@ get_db_infos(migratorContext *ctx, DbInfoArr *dbinfs_arr, Cluster whichCluster)
int i_oid;
int i_spclocation;
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT d.oid, d.datname, t.spclocation "
"FROM pg_catalog.pg_database d "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
@ -238,7 +240,7 @@ get_db_infos(migratorContext *ctx, DbInfoArr *dbinfs_arr, Cluster whichCluster)
i_spclocation = PQfnumber(res, "spclocation");
ntups = PQntuples(res);
dbinfos = (DbInfo *) pg_malloc(ctx, sizeof(DbInfo) * ntups);
dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups);
for (tupnum = 0; tupnum < ntups; tupnum++)
{
@ -265,18 +267,18 @@ get_db_infos(migratorContext *ctx, DbInfoArr *dbinfs_arr, Cluster whichCluster)
* on the given "port". Assumes that server is already running.
*/
void
get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr, Cluster whichCluster)
get_db_and_rel_infos(DbInfoArr *db_arr, Cluster whichCluster)
{
int dbnum;
get_db_infos(ctx, db_arr, whichCluster);
get_db_infos(db_arr, whichCluster);
for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++)
get_rel_infos(ctx, &db_arr->dbs[dbnum],
get_rel_infos(&db_arr->dbs[dbnum],
&(db_arr->dbs[dbnum].rel_arr), whichCluster);
if (ctx->debug)
dbarr_print(ctx, db_arr, whichCluster);
if (log.debug)
dbarr_print(db_arr, whichCluster);
}
@ -290,10 +292,10 @@ get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr, Cluster whichClust
* FirstNormalObjectId belongs to the user
*/
static void
get_rel_infos(migratorContext *ctx, const DbInfo *dbinfo,
get_rel_infos(const DbInfo *dbinfo,
RelInfoArr *relarr, Cluster whichCluster)
{
PGconn *conn = connectToServer(ctx, dbinfo->db_name, whichCluster);
PGconn *conn = connectToServer(dbinfo->db_name, whichCluster);
PGresult *res;
RelInfo *relinfos;
int ntups;
@ -339,14 +341,14 @@ get_rel_infos(migratorContext *ctx, const DbInfo *dbinfo,
"ORDER BY n.nspname, c.relname;",
FirstNormalObjectId,
/* see the comment at the top of old_8_3_create_sequence_script() */
(GET_MAJOR_VERSION(ctx->old.major_version) <= 803) ?
(GET_MAJOR_VERSION(old_cluster.major_version) <= 803) ?
"" : ", 'S'");
res = executeQueryOrDie(ctx, conn, query);
res = executeQueryOrDie(conn, query);
ntups = PQntuples(res);
relinfos = (RelInfo *) pg_malloc(ctx, sizeof(RelInfo) * ntups);
relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups);
i_oid = PQfnumber(res, "oid");
i_nspname = PQfnumber(res, "nspname");
@ -416,7 +418,7 @@ dbarr_lookup_db(DbInfoArr *db_arr, const char *db_name)
* RelInfo structure.
*/
static RelInfo *
relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
relarr_lookup_rel(RelInfoArr *rel_arr,
const char *nspname, const char *relname,
Cluster whichCluster)
{
@ -431,7 +433,7 @@ relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
strcmp(rel_arr->rels[relnum].relname, relname) == 0)
return &rel_arr->rels[relnum];
}
pg_log(ctx, PG_FATAL, "Could not find %s.%s in %s cluster\n",
pg_log(PG_FATAL, "Could not find %s.%s in %s cluster\n",
nspname, relname, CLUSTERNAME(whichCluster));
return NULL;
}
@ -445,7 +447,7 @@ relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
* found.
*/
static RelInfo *
relarr_lookup_reloid(migratorContext *ctx, RelInfoArr *rel_arr, Oid oid,
relarr_lookup_reloid(RelInfoArr *rel_arr, Oid oid,
Cluster whichCluster)
{
int relnum;
@ -458,7 +460,7 @@ relarr_lookup_reloid(migratorContext *ctx, RelInfoArr *rel_arr, Oid oid,
if (rel_arr->rels[relnum].reloid == oid)
return &rel_arr->rels[relnum];
}
pg_log(ctx, PG_FATAL, "Could not find %d in %s cluster\n",
pg_log(PG_FATAL, "Could not find %d in %s cluster\n",
oid, CLUSTERNAME(whichCluster));
return NULL;
}
@ -484,28 +486,28 @@ dbarr_free(DbInfoArr *db_arr)
static void
dbarr_print(migratorContext *ctx, DbInfoArr *arr, Cluster whichCluster)
dbarr_print(DbInfoArr *arr, Cluster whichCluster)
{
int dbnum;
pg_log(ctx, PG_DEBUG, "%s databases\n", CLUSTERNAME(whichCluster));
pg_log(PG_DEBUG, "%s databases\n", CLUSTERNAME(whichCluster));
for (dbnum = 0; dbnum < arr->ndbs; dbnum++)
{
pg_log(ctx, PG_DEBUG, "Database: %s\n", arr->dbs[dbnum].db_name);
relarr_print(ctx, &arr->dbs[dbnum].rel_arr);
pg_log(ctx, PG_DEBUG, "\n\n");
pg_log(PG_DEBUG, "Database: %s\n", arr->dbs[dbnum].db_name);
relarr_print(&arr->dbs[dbnum].rel_arr);
pg_log(PG_DEBUG, "\n\n");
}
}
static void
relarr_print(migratorContext *ctx, RelInfoArr *arr)
relarr_print(RelInfoArr *arr)
{
int relnum;
for (relnum = 0; relnum < arr->nrels; relnum++)
pg_log(ctx, PG_DEBUG, "relname: %s.%s: reloid: %u reltblspace: %s\n",
pg_log(PG_DEBUG, "relname: %s.%s: reloid: %u reltblspace: %s\n",
arr->rels[relnum].nspname, arr->rels[relnum].relname,
arr->rels[relnum].reloid, arr->rels[relnum].tablespace);
}

View File

@ -16,11 +16,14 @@
#endif
static void usage(migratorContext *ctx);
static void validateDirectoryOption(migratorContext *ctx, char **dirpath,
static void usage(void);
static void validateDirectoryOption(char **dirpath,
char *envVarName, char *cmdLineOption, char *description);
static void get_pkglibdirs(migratorContext *ctx);
static char *get_pkglibdir(migratorContext *ctx, const char *bindir);
static void get_pkglibdirs(void);
static char *get_pkglibdir(const char *bindir);
UserOpts user_opts;
/*
@ -30,7 +33,7 @@ static char *get_pkglibdir(migratorContext *ctx, const char *bindir);
* and initializes the rest of the object.
*/
void
parseCommandLine(migratorContext *ctx, int argc, char *argv[])
parseCommandLine(int argc, char *argv[])
{
static struct option long_options[] = {
{"old-datadir", required_argument, NULL, 'd'},
@ -55,39 +58,39 @@ parseCommandLine(migratorContext *ctx, int argc, char *argv[])
if (getenv("PGUSER"))
{
pg_free(ctx->user);
ctx->user = pg_strdup(ctx, getenv("PGUSER"));
pg_free(os_info.user);
os_info.user = pg_strdup(getenv("PGUSER"));
}
ctx->progname = get_progname(argv[0]);
ctx->old.port = getenv("PGPORT") ? atoi(getenv("PGPORT")) : DEF_PGPORT;
ctx->new.port = getenv("PGPORT") ? atoi(getenv("PGPORT")) : DEF_PGPORT;
os_info.progname = get_progname(argv[0]);
old_cluster.port = getenv("PGPORT") ? atoi(getenv("PGPORT")) : DEF_PGPORT;
new_cluster.port = getenv("PGPORT") ? atoi(getenv("PGPORT")) : DEF_PGPORT;
/* must save value, getenv()'s pointer is not stable */
ctx->transfer_mode = TRANSFER_MODE_COPY;
user_opts.transfer_mode = TRANSFER_MODE_COPY;
/* user lookup and 'root' test must be split because of usage() */
user_id = get_user_info(ctx, &ctx->user);
user_id = get_user_info(&os_info.user);
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0 ||
strcmp(argv[1], "-?") == 0)
{
usage(ctx);
exit_nicely(ctx, false);
usage();
exit_nicely(false);
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
pg_log(ctx, PG_REPORT, "pg_upgrade " PG_VERSION "\n");
exit_nicely(ctx, false);
pg_log(PG_REPORT, "pg_upgrade " PG_VERSION "\n");
exit_nicely(false);
}
}
if (user_id == 0)
pg_log(ctx, PG_FATAL, "%s: cannot be run as root\n", ctx->progname);
pg_log(PG_FATAL, "%s: cannot be run as root\n", os_info.progname);
getcwd(ctx->cwd, MAXPGPATH);
getcwd(os_info.cwd, MAXPGPATH);
while ((option = getopt_long(argc, argv, "d:D:b:B:cgG:kl:p:P:u:v",
long_options, &optindex)) != -1)
@ -95,81 +98,81 @@ parseCommandLine(migratorContext *ctx, int argc, char *argv[])
switch (option)
{
case 'd':
ctx->old.pgdata = pg_strdup(ctx, optarg);
old_cluster.pgdata = pg_strdup(optarg);
break;
case 'D':
ctx->new.pgdata = pg_strdup(ctx, optarg);
new_cluster.pgdata = pg_strdup(optarg);
break;
case 'b':
ctx->old.bindir = pg_strdup(ctx, optarg);
old_cluster.bindir = pg_strdup(optarg);
break;
case 'B':
ctx->new.bindir = pg_strdup(ctx, optarg);
new_cluster.bindir = pg_strdup(optarg);
break;
case 'c':
ctx->check = true;
user_opts.check = true;
break;
case 'g':
pg_log(ctx, PG_REPORT, "Running in debug mode\n");
ctx->debug = true;
pg_log(PG_REPORT, "Running in debug mode\n");
log.debug = true;
break;
case 'G':
if ((ctx->debug_fd = fopen(optarg, "w")) == NULL)
if ((log.debug_fd = fopen(optarg, "w")) == NULL)
{
pg_log(ctx, PG_FATAL, "cannot open debug file\n");
exit_nicely(ctx, false);
pg_log(PG_FATAL, "cannot open debug file\n");
exit_nicely(false);
}
break;
case 'k':
ctx->transfer_mode = TRANSFER_MODE_LINK;
user_opts.transfer_mode = TRANSFER_MODE_LINK;
break;
case 'l':
ctx->logfile = pg_strdup(ctx, optarg);
log.filename = pg_strdup(optarg);
break;
case 'p':
if ((ctx->old.port = atoi(optarg)) <= 0)
if ((old_cluster.port = atoi(optarg)) <= 0)
{
pg_log(ctx, PG_FATAL, "invalid old port number\n");
exit_nicely(ctx, false);
pg_log(PG_FATAL, "invalid old port number\n");
exit_nicely(false);
}
break;
case 'P':
if ((ctx->new.port = atoi(optarg)) <= 0)
if ((new_cluster.port = atoi(optarg)) <= 0)
{
pg_log(ctx, PG_FATAL, "invalid new port number\n");
exit_nicely(ctx, false);
pg_log(PG_FATAL, "invalid new port number\n");
exit_nicely(false);
}
break;
case 'u':
pg_free(ctx->user);
ctx->user = pg_strdup(ctx, optarg);
pg_free(os_info.user);
os_info.user = pg_strdup(optarg);
break;
case 'v':
pg_log(ctx, PG_REPORT, "Running in verbose mode\n");
ctx->verbose = true;
pg_log(PG_REPORT, "Running in verbose mode\n");
log.verbose = true;
break;
default:
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"Try \"%s --help\" for more information.\n",
ctx->progname);
os_info.progname);
break;
}
}
if (ctx->logfile != NULL)
if (log.filename != NULL)
{
/*
* We must use append mode so output generated by child processes via
@ -177,39 +180,39 @@ parseCommandLine(migratorContext *ctx, int argc, char *argv[])
* start.
*/
/* truncate */
if ((ctx->log_fd = fopen(ctx->logfile, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to log file %s\n", ctx->logfile);
fclose(ctx->log_fd);
if ((ctx->log_fd = fopen(ctx->logfile, "a")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to log file %s\n", ctx->logfile);
if ((log.fd = fopen(log.filename, "w")) == NULL)
pg_log(PG_FATAL, "Cannot write to log file %s\n", log.filename);
fclose(log.fd);
if ((log.fd = fopen(log.filename, "a")) == NULL)
pg_log(PG_FATAL, "Cannot write to log file %s\n", log.filename);
}
else
ctx->logfile = strdup(DEVNULL);
log.filename = strdup(DEVNULL);
/* if no debug file name, output to the terminal */
if (ctx->debug && !ctx->debug_fd)
if (log.debug && !log.debug_fd)
{
ctx->debug_fd = fopen(DEVTTY, "w");
if (!ctx->debug_fd)
pg_log(ctx, PG_FATAL, "Cannot write to terminal\n");
log.debug_fd = fopen(DEVTTY, "w");
if (!log.debug_fd)
pg_log(PG_FATAL, "Cannot write to terminal\n");
}
/* Get values from env if not already set */
validateDirectoryOption(ctx, &ctx->old.pgdata, "OLDDATADIR", "-d",
validateDirectoryOption(&old_cluster.pgdata, "OLDDATADIR", "-d",
"old cluster data resides");
validateDirectoryOption(ctx, &ctx->new.pgdata, "NEWDATADIR", "-D",
validateDirectoryOption(&new_cluster.pgdata, "NEWDATADIR", "-D",
"new cluster data resides");
validateDirectoryOption(ctx, &ctx->old.bindir, "OLDBINDIR", "-b",
validateDirectoryOption(&old_cluster.bindir, "OLDBINDIR", "-b",
"old cluster binaries reside");
validateDirectoryOption(ctx, &ctx->new.bindir, "NEWBINDIR", "-B",
validateDirectoryOption(&new_cluster.bindir, "NEWBINDIR", "-B",
"new cluster binaries reside");
get_pkglibdirs(ctx);
get_pkglibdirs();
}
static void
usage(migratorContext *ctx)
usage(void)
{
printf(_("\nUsage: pg_upgrade [OPTIONS]...\n\
\n\
@ -243,7 +246,7 @@ When you run pg_upgrade, you must provide the following information:\n\
\n\
For example:\n\
pg_upgrade -d oldCluster/data -D newCluster/data -b oldCluster/bin -B newCluster/bin\n\
or\n"), ctx->old.port, ctx->new.port, ctx->user);
or\n"), old_cluster.port, new_cluster.port, os_info.user);
#ifndef WIN32
printf(_("\
$ export OLDDATADIR=oldCluster/data\n\
@ -275,7 +278,7 @@ or\n"), ctx->old.port, ctx->new.port, ctx->user);
* user hasn't provided the required directory name.
*/
static void
validateDirectoryOption(migratorContext *ctx, char **dirpath,
validateDirectoryOption(char **dirpath,
char *envVarName, char *cmdLineOption, char *description)
{
if (*dirpath == NULL || (strlen(*dirpath) == 0))
@ -283,10 +286,10 @@ validateDirectoryOption(migratorContext *ctx, char **dirpath,
const char *envVar;
if ((envVar = getenv(envVarName)) && strlen(envVar))
*dirpath = pg_strdup(ctx, envVar);
*dirpath = pg_strdup(envVar);
else
{
pg_log(ctx, PG_FATAL, "You must identify the directory where the %s\n"
pg_log(PG_FATAL, "You must identify the directory where the %s\n"
"Please use the %s command-line option or the %s environment variable\n",
description, cmdLineOption, envVarName);
}
@ -306,15 +309,15 @@ validateDirectoryOption(migratorContext *ctx, char **dirpath,
static void
get_pkglibdirs(migratorContext *ctx)
get_pkglibdirs(void)
{
ctx->old.libpath = get_pkglibdir(ctx, ctx->old.bindir);
ctx->new.libpath = get_pkglibdir(ctx, ctx->new.bindir);
old_cluster.libpath = get_pkglibdir(old_cluster.bindir);
new_cluster.libpath = get_pkglibdir(new_cluster.bindir);
}
static char *
get_pkglibdir(migratorContext *ctx, const char *bindir)
get_pkglibdir(const char *bindir)
{
char cmd[MAXPGPATH];
char bufin[MAX_STRING];
@ -324,7 +327,7 @@ get_pkglibdir(migratorContext *ctx, const char *bindir)
snprintf(cmd, sizeof(cmd), "\"%s/pg_config\" --pkglibdir", bindir);
if ((output = popen(cmd, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Could not get pkglibdir data: %s\n",
pg_log(PG_FATAL, "Could not get pkglibdir data: %s\n",
getErrorText(errno));
fgets(bufin, sizeof(bufin), output);
@ -338,5 +341,5 @@ get_pkglibdir(migratorContext *ctx, const char *bindir)
if (bufin[i] == '\n')
bufin[i] = '\0';
return pg_strdup(ctx, bufin);
return pg_strdup(bufin);
}

View File

@ -15,9 +15,9 @@
#ifdef PAGE_CONVERSION
static const char *getPageVersion(migratorContext *ctx,
static const char *getPageVersion(
uint16 *version, const char *pathName);
static pageCnvCtx *loadConverterPlugin(migratorContext *ctx,
static pageCnvCtx *loadConverterPlugin(
uint16 newPageVersion, uint16 oldPageVersion);
@ -37,7 +37,7 @@ static pageCnvCtx *loadConverterPlugin(migratorContext *ctx,
* string.
*/
const char *
setupPageConverter(migratorContext *ctx, pageCnvCtx **result)
setupPageConverter(pageCnvCtx **result)
{
uint16 oldPageVersion;
uint16 newPageVersion;
@ -46,15 +46,15 @@ setupPageConverter(migratorContext *ctx, pageCnvCtx **result)
char dstName[MAXPGPATH];
char srcName[MAXPGPATH];
snprintf(dstName, sizeof(dstName), "%s/global/%u", ctx->new.pgdata,
ctx->new.pg_database_oid);
snprintf(srcName, sizeof(srcName), "%s/global/%u", ctx->old.pgdata,
ctx->old.pg_database_oid);
snprintf(dstName, sizeof(dstName), "%s/global/%u", new_cluster.pgdata,
new_cluster.pg_database_oid);
snprintf(srcName, sizeof(srcName), "%s/global/%u", old_cluster.pgdata,
old_cluster.pg_database_oid);
if ((msg = getPageVersion(ctx, &oldPageVersion, srcName)) != NULL)
if ((msg = getPageVersion(&oldPageVersion, srcName)) != NULL)
return msg;
if ((msg = getPageVersion(ctx, &newPageVersion, dstName)) != NULL)
if ((msg = getPageVersion(&newPageVersion, dstName)) != NULL)
return msg;
/*
@ -73,7 +73,7 @@ setupPageConverter(migratorContext *ctx, pageCnvCtx **result)
* layout.
*/
if ((converter = loadConverterPlugin(ctx, newPageVersion, oldPageVersion)) == NULL)
if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL)
return "can't find plugin to convert from old page layout to new page layout";
else
{
@ -93,7 +93,7 @@ setupPageConverter(migratorContext *ctx, pageCnvCtx **result)
* of a null-terminated string).
*/
static const char *
getPageVersion(migratorContext *ctx, uint16 *version, const char *pathName)
getPageVersion(uint16 *version, const char *pathName)
{
int relfd;
PageHeaderData page;
@ -128,7 +128,7 @@ getPageVersion(migratorContext *ctx, uint16 *version, const char *pathName)
* is not found, this function returns NULL.
*/
static pageCnvCtx *
loadConverterPlugin(migratorContext *ctx, uint16 newPageVersion, uint16 oldPageVersion)
loadConverterPlugin(uint16 newPageVersion, uint16 oldPageVersion)
{
char pluginName[MAXPGPATH];
void *plugin;
@ -151,7 +151,7 @@ loadConverterPlugin(migratorContext *ctx, uint16 newPageVersion, uint16 oldPageV
return NULL;
else
{
pageCnvCtx *result = (pageCnvCtx *) pg_malloc(ctx, sizeof(*result));
pageCnvCtx *result = (pageCnvCtx *) pg_malloc(sizeof(*result));
result->old.PageVersion = oldPageVersion;
result->new.PageVersion = newPageVersion;

View File

@ -13,66 +13,65 @@
#include <langinfo.h>
#endif
static void disable_old_cluster(migratorContext *ctx);
static void prepare_new_cluster(migratorContext *ctx);
static void prepare_new_databases(migratorContext *ctx);
static void create_new_objects(migratorContext *ctx);
static void copy_clog_xlog_xid(migratorContext *ctx);
static void set_frozenxids(migratorContext *ctx);
static void setup(migratorContext *ctx, char *argv0, bool live_check);
static void cleanup(migratorContext *ctx);
static void disable_old_cluster(void);
static void prepare_new_cluster(void);
static void prepare_new_databases(void);
static void create_new_objects(void);
static void copy_clog_xlog_xid(void);
static void set_frozenxids(void);
static void setup(char *argv0, bool live_check);
static void cleanup(void);
ClusterInfo old_cluster, new_cluster;
OSInfo os_info;
int
main(int argc, char **argv)
{
migratorContext ctx;
char *sequence_script_file_name = NULL;
char *deletion_script_file_name = NULL;
bool live_check = false;
memset(&ctx, 0, sizeof(ctx));
parseCommandLine(argc, argv);
parseCommandLine(&ctx, argc, argv);
output_check_banner(&live_check);
output_check_banner(&ctx, &live_check);
setup(argv[0], live_check);
setup(&ctx, argv[0], live_check);
check_cluster_versions();
check_cluster_compatibility(live_check);
check_cluster_versions(&ctx);
check_cluster_compatibility(&ctx, live_check);
check_old_cluster(&ctx, live_check, &sequence_script_file_name);
check_old_cluster(live_check, &sequence_script_file_name);
/* -- NEW -- */
start_postmaster(&ctx, CLUSTER_NEW, false);
start_postmaster(CLUSTER_NEW, false);
check_new_cluster(&ctx);
report_clusters_compatible(&ctx);
check_new_cluster();
report_clusters_compatible();
pg_log(&ctx, PG_REPORT, "\nPerforming Migration\n");
pg_log(&ctx, PG_REPORT, "--------------------\n");
pg_log(PG_REPORT, "\nPerforming Migration\n");
pg_log(PG_REPORT, "--------------------\n");
disable_old_cluster(&ctx);
prepare_new_cluster(&ctx);
disable_old_cluster();
prepare_new_cluster();
stop_postmaster(&ctx, false, false);
stop_postmaster(false, false);
/*
* Destructive Changes to New Cluster
*/
copy_clog_xlog_xid(&ctx);
copy_clog_xlog_xid();
/* New now using xids of the old system */
prepare_new_databases(&ctx);
prepare_new_databases();
create_new_objects(&ctx);
create_new_objects();
transfer_all_new_dbs(&ctx, &ctx.old.dbarr, &ctx.new.dbarr,
ctx.old.pgdata, ctx.new.pgdata);
transfer_all_new_dbs(&old_cluster.dbarr, &new_cluster.dbarr,
old_cluster.pgdata, new_cluster.pgdata);
/*
* Assuming OIDs are only used in system tables, there is no need to
@ -80,32 +79,32 @@ main(int argc, char **argv)
* the old system, but we do it anyway just in case. We do it late here
* because there is no need to have the schema load use new oids.
*/
prep_status(&ctx, "Setting next oid for new cluster");
exec_prog(&ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" > "
prep_status("Setting next oid for new cluster");
exec_prog(true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" > "
DEVNULL SYSTEMQUOTE,
ctx.new.bindir, ctx.old.controldata.chkpnt_nxtoid, ctx.new.pgdata);
check_ok(&ctx);
new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid, new_cluster.pgdata);
check_ok();
create_script_for_old_cluster_deletion(&ctx, &deletion_script_file_name);
create_script_for_old_cluster_deletion(&deletion_script_file_name);
issue_warnings(&ctx, sequence_script_file_name);
issue_warnings(sequence_script_file_name);
pg_log(&ctx, PG_REPORT, "\nUpgrade complete\n");
pg_log(&ctx, PG_REPORT, "----------------\n");
pg_log(PG_REPORT, "\nUpgrade complete\n");
pg_log(PG_REPORT, "----------------\n");
output_completion_banner(&ctx, deletion_script_file_name);
output_completion_banner(deletion_script_file_name);
pg_free(deletion_script_file_name);
pg_free(sequence_script_file_name);
cleanup(&ctx);
cleanup();
return 0;
}
static void
setup(migratorContext *ctx, char *argv0, bool live_check)
setup(char *argv0, bool live_check)
{
char exec_path[MAXPGPATH]; /* full path to my executable */
@ -113,57 +112,57 @@ setup(migratorContext *ctx, char *argv0, bool live_check)
* make sure the user has a clean environment, otherwise, we may confuse
* libpq when we connect to one (or both) of the servers.
*/
check_for_libpq_envvars(ctx);
check_for_libpq_envvars();
verify_directories(ctx);
verify_directories();
/* no postmasters should be running */
if (!live_check && is_server_running(ctx, ctx->old.pgdata))
if (!live_check && is_server_running(old_cluster.pgdata))
{
pg_log(ctx, PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n"
pg_log(PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n"
"Please shutdown that postmaster and try again.\n");
}
/* same goes for the new postmaster */
if (is_server_running(ctx, ctx->new.pgdata))
if (is_server_running(new_cluster.pgdata))
{
pg_log(ctx, PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
"Please shutdown that postmaster and try again.\n");
}
/* get path to pg_upgrade executable */
if (find_my_exec(argv0, exec_path) < 0)
pg_log(ctx, PG_FATAL, "Could not get pathname to pg_upgrade: %s\n", getErrorText(errno));
pg_log(PG_FATAL, "Could not get pathname to pg_upgrade: %s\n", getErrorText(errno));
/* Trim off program name and keep just path */
*last_dir_separator(exec_path) = '\0';
canonicalize_path(exec_path);
ctx->exec_path = pg_strdup(ctx, exec_path);
os_info.exec_path = pg_strdup(exec_path);
}
static void
disable_old_cluster(migratorContext *ctx)
disable_old_cluster(void)
{
/* rename pg_control so old server cannot be accidentally started */
rename_old_pg_control(ctx);
rename_old_pg_control();
}
static void
prepare_new_cluster(migratorContext *ctx)
prepare_new_cluster(void)
{
/*
* It would make more sense to freeze after loading the schema, but that
* would cause us to lose the frozenids restored by the load. We use
* --analyze so autovacuum doesn't update statistics later
*/
prep_status(ctx, "Analyzing all rows in the new cluster");
exec_prog(ctx, true,
prep_status("Analyzing all rows in the new cluster");
exec_prog(true,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
"--all --analyze >> %s 2>&1" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user, ctx->logfile);
check_ok(ctx);
new_cluster.bindir, new_cluster.port, os_info.user, log.filename);
check_ok();
/*
* We do freeze after analyze so pg_statistic is also frozen. template0 is
@ -171,22 +170,22 @@ prepare_new_cluster(migratorContext *ctx)
* datfrozenxid and relfrozenxids later to match the new xid counter
* later.
*/
prep_status(ctx, "Freezing all rows on the new cluster");
exec_prog(ctx, true,
prep_status("Freezing all rows on the new cluster");
exec_prog(true,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
"--all --freeze >> %s 2>&1" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user, ctx->logfile);
check_ok(ctx);
new_cluster.bindir, new_cluster.port, os_info.user, log.filename);
check_ok();
get_pg_database_relfilenode(ctx, CLUSTER_NEW);
get_pg_database_relfilenode(CLUSTER_NEW);
}
static void
prepare_new_databases(migratorContext *ctx)
prepare_new_databases(void)
{
/* -- NEW -- */
start_postmaster(ctx, CLUSTER_NEW, false);
start_postmaster(CLUSTER_NEW, false);
/*
* We set autovacuum_freeze_max_age to its maximum value so autovacuum
@ -194,96 +193,96 @@ prepare_new_databases(migratorContext *ctx)
* set.
*/
set_frozenxids(ctx);
set_frozenxids();
/*
* We have to create the databases first so we can create the toast table
* placeholder relfiles.
*/
prep_status(ctx, "Creating databases in the new cluster");
exec_prog(ctx, true,
prep_status("Creating databases in the new cluster");
exec_prog(true,
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on "
/* --no-psqlrc prevents AUTOCOMMIT=off */
"--no-psqlrc --port %d --username \"%s\" "
"-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user, ctx->cwd,
GLOBALS_DUMP_FILE, ctx->logfile);
check_ok(ctx);
new_cluster.bindir, new_cluster.port, os_info.user, os_info.cwd,
GLOBALS_DUMP_FILE, log.filename);
check_ok();
get_db_and_rel_infos(ctx, &ctx->new.dbarr, CLUSTER_NEW);
get_db_and_rel_infos(&new_cluster.dbarr, CLUSTER_NEW);
stop_postmaster(ctx, false, false);
stop_postmaster(false, false);
}
static void
create_new_objects(migratorContext *ctx)
create_new_objects(void)
{
/* -- NEW -- */
start_postmaster(ctx, CLUSTER_NEW, false);
start_postmaster(CLUSTER_NEW, false);
install_support_functions(ctx);
install_support_functions();
prep_status(ctx, "Restoring database schema to new cluster");
exec_prog(ctx, true,
prep_status("Restoring database schema to new cluster");
exec_prog(true,
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on "
"--no-psqlrc --port %d --username \"%s\" "
"-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user, ctx->cwd,
DB_DUMP_FILE, ctx->logfile);
check_ok(ctx);
new_cluster.bindir, new_cluster.port, os_info.user, os_info.cwd,
DB_DUMP_FILE, log.filename);
check_ok();
/* regenerate now that we have db schemas */
dbarr_free(&ctx->new.dbarr);
get_db_and_rel_infos(ctx, &ctx->new.dbarr, CLUSTER_NEW);
dbarr_free(&new_cluster.dbarr);
get_db_and_rel_infos(&new_cluster.dbarr, CLUSTER_NEW);
uninstall_support_functions(ctx);
uninstall_support_functions();
stop_postmaster(ctx, false, false);
stop_postmaster(false, false);
}
static void
copy_clog_xlog_xid(migratorContext *ctx)
copy_clog_xlog_xid(void)
{
char old_clog_path[MAXPGPATH];
char new_clog_path[MAXPGPATH];
/* copy old commit logs to new data dir */
prep_status(ctx, "Deleting new commit clogs");
prep_status("Deleting new commit clogs");
snprintf(old_clog_path, sizeof(old_clog_path), "%s/pg_clog", ctx->old.pgdata);
snprintf(new_clog_path, sizeof(new_clog_path), "%s/pg_clog", ctx->new.pgdata);
snprintf(old_clog_path, sizeof(old_clog_path), "%s/pg_clog", old_cluster.pgdata);
snprintf(new_clog_path, sizeof(new_clog_path), "%s/pg_clog", new_cluster.pgdata);
if (rmtree(new_clog_path, true) != true)
pg_log(ctx, PG_FATAL, "Unable to delete directory %s\n", new_clog_path);
check_ok(ctx);
pg_log(PG_FATAL, "Unable to delete directory %s\n", new_clog_path);
check_ok();
prep_status(ctx, "Copying old commit clogs to new server");
prep_status("Copying old commit clogs to new server");
/* libpgport's copydir() doesn't work in FRONTEND code */
#ifndef WIN32
exec_prog(ctx, true, SYSTEMQUOTE "%s \"%s\" \"%s\"" SYSTEMQUOTE,
exec_prog(true, SYSTEMQUOTE "%s \"%s\" \"%s\"" SYSTEMQUOTE,
"cp -Rf",
#else
/* flags: everything, no confirm, quiet, overwrite read-only */
exec_prog(ctx, true, SYSTEMQUOTE "%s \"%s\" \"%s\\\"" SYSTEMQUOTE,
exec_prog(true, SYSTEMQUOTE "%s \"%s\" \"%s\\\"" SYSTEMQUOTE,
"xcopy /e /y /q /r",
#endif
old_clog_path, new_clog_path);
check_ok(ctx);
check_ok();
/* set the next transaction id of the new cluster */
prep_status(ctx, "Setting next transaction id for new cluster");
exec_prog(ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -f -x %u \"%s\" > " DEVNULL SYSTEMQUOTE,
ctx->new.bindir, ctx->old.controldata.chkpnt_nxtxid, ctx->new.pgdata);
check_ok(ctx);
prep_status("Setting next transaction id for new cluster");
exec_prog(true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -f -x %u \"%s\" > " DEVNULL SYSTEMQUOTE,
new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid, new_cluster.pgdata);
check_ok();
/* now reset the wal archives in the new cluster */
prep_status(ctx, "Resetting WAL archives");
exec_prog(ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -l %u,%u,%u \"%s\" >> \"%s\" 2>&1" SYSTEMQUOTE,
ctx->new.bindir, ctx->old.controldata.chkpnt_tli,
ctx->old.controldata.logid, ctx->old.controldata.nxtlogseg,
ctx->new.pgdata, ctx->logfile);
check_ok(ctx);
prep_status("Resetting WAL archives");
exec_prog(true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -l %u,%u,%u \"%s\" >> \"%s\" 2>&1" SYSTEMQUOTE,
new_cluster.bindir, old_cluster.controldata.chkpnt_tli,
old_cluster.controldata.logid, old_cluster.controldata.nxtlogseg,
new_cluster.pgdata, log.filename);
check_ok();
}
@ -298,7 +297,7 @@ copy_clog_xlog_xid(migratorContext *ctx)
*/
static
void
set_frozenxids(migratorContext *ctx)
set_frozenxids(void)
{
int dbnum;
PGconn *conn,
@ -308,18 +307,18 @@ set_frozenxids(migratorContext *ctx)
int i_datname;
int i_datallowconn;
prep_status(ctx, "Setting frozenxid counters in new cluster");
prep_status("Setting frozenxid counters in new cluster");
conn_template1 = connectToServer(ctx, "template1", CLUSTER_NEW);
conn_template1 = connectToServer("template1", CLUSTER_NEW);
/* set pg_database.datfrozenxid */
PQclear(executeQueryOrDie(ctx, conn_template1,
PQclear(executeQueryOrDie(conn_template1,
"UPDATE pg_catalog.pg_database "
"SET datfrozenxid = '%u'",
ctx->old.controldata.chkpnt_nxtxid));
old_cluster.controldata.chkpnt_nxtxid));
/* get database names */
dbres = executeQueryOrDie(ctx, conn_template1,
dbres = executeQueryOrDie(conn_template1,
"SELECT datname, datallowconn "
"FROM pg_catalog.pg_database");
@ -340,25 +339,25 @@ set_frozenxids(migratorContext *ctx)
* change datallowconn.
*/
if (strcmp(datallowconn, "f") == 0)
PQclear(executeQueryOrDie(ctx, conn_template1,
PQclear(executeQueryOrDie(conn_template1,
"UPDATE pg_catalog.pg_database "
"SET datallowconn = true "
"WHERE datname = '%s'", datname));
conn = connectToServer(ctx, datname, CLUSTER_NEW);
conn = connectToServer(datname, CLUSTER_NEW);
/* set pg_class.relfrozenxid */
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"UPDATE pg_catalog.pg_class "
"SET relfrozenxid = '%u' "
/* only heap and TOAST are vacuumed */
"WHERE relkind IN ('r', 't')",
ctx->old.controldata.chkpnt_nxtxid));
old_cluster.controldata.chkpnt_nxtxid));
PQfinish(conn);
/* Reset datallowconn flag */
if (strcmp(datallowconn, "f") == 0)
PQclear(executeQueryOrDie(ctx, conn_template1,
PQclear(executeQueryOrDie(conn_template1,
"UPDATE pg_catalog.pg_database "
"SET datallowconn = false "
"WHERE datname = '%s'", datname));
@ -368,48 +367,48 @@ set_frozenxids(migratorContext *ctx)
PQfinish(conn_template1);
check_ok(ctx);
check_ok();
}
static void
cleanup(migratorContext *ctx)
cleanup(void)
{
int tblnum;
char filename[MAXPGPATH];
for (tblnum = 0; tblnum < ctx->num_tablespaces; tblnum++)
pg_free(ctx->tablespaces[tblnum]);
pg_free(ctx->tablespaces);
for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++)
pg_free(os_info.tablespaces[tblnum]);
pg_free(os_info.tablespaces);
dbarr_free(&ctx->old.dbarr);
dbarr_free(&ctx->new.dbarr);
pg_free(ctx->logfile);
pg_free(ctx->user);
pg_free(ctx->old.major_version_str);
pg_free(ctx->new.major_version_str);
pg_free(ctx->old.controldata.lc_collate);
pg_free(ctx->new.controldata.lc_collate);
pg_free(ctx->old.controldata.lc_ctype);
pg_free(ctx->new.controldata.lc_ctype);
pg_free(ctx->old.controldata.encoding);
pg_free(ctx->new.controldata.encoding);
pg_free(ctx->old.tablespace_suffix);
pg_free(ctx->new.tablespace_suffix);
dbarr_free(&old_cluster.dbarr);
dbarr_free(&new_cluster.dbarr);
pg_free(log.filename);
pg_free(os_info.user);
pg_free(old_cluster.major_version_str);
pg_free(new_cluster.major_version_str);
pg_free(old_cluster.controldata.lc_collate);
pg_free(new_cluster.controldata.lc_collate);
pg_free(old_cluster.controldata.lc_ctype);
pg_free(new_cluster.controldata.lc_ctype);
pg_free(old_cluster.controldata.encoding);
pg_free(new_cluster.controldata.encoding);
pg_free(old_cluster.tablespace_suffix);
pg_free(new_cluster.tablespace_suffix);
if (ctx->log_fd != NULL)
if (log.fd != NULL)
{
fclose(ctx->log_fd);
ctx->log_fd = NULL;
fclose(log.fd);
log.fd = NULL;
}
if (ctx->debug_fd)
fclose(ctx->debug_fd);
if (log.debug_fd)
fclose(log.debug_fd);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, ALL_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, ALL_DUMP_FILE);
unlink(filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, GLOBALS_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, GLOBALS_DUMP_FILE);
unlink(filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, DB_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, DB_DUMP_FILE);
unlink(filename);
}

View File

@ -53,6 +53,8 @@
#endif
#define CLUSTERNAME(cluster) ((cluster) == CLUSTER_OLD ? "old" : "new")
#define ACTIVE_CLUSTER(cluster) (((cluster) == CLUSTER_OLD) ? \
&old_cluster : &new_cluster)
#define atooid(x) ((Oid) strtoul((x), NULL, 10))
@ -83,8 +85,8 @@ typedef struct
*/
typedef struct
{
Oid old; /* Relfilenode of the old relation */
Oid new; /* Relfilenode of the new relation */
Oid old_relfilenode; /* Relfilenode of the old relation */
Oid new_relfilenode; /* Relfilenode of the new relation */
char old_file[MAXPGPATH];
char new_file[MAXPGPATH];
char old_nspname[NAMEDATALEN]; /* old name of the namespace */
@ -194,15 +196,34 @@ typedef struct
/*
* migratorContext
*
* We create a migratorContext object to store all of the information
* that we need to migrate a single cluster.
* Log
*/
typedef struct
{
char *filename; /* name of log file (may be /dev/null) */
FILE *fd; /* log FILE */
bool debug; /* TRUE -> log more information */
FILE *debug_fd; /* debug-level log FILE */
bool verbose; /* TRUE -> be verbose in messages */
} Log;
/*
* UserOpts
*/
typedef struct
{
bool check; /* TRUE -> ask user for permission to make
* changes */
transferMode transfer_mode; /* copy files or link them? */
} UserOpts;
/*
* OSInfo
*/
typedef struct
{
ClusterInfo old,
new; /* old and new cluster information */
const char *progname; /* complete pathname for this program */
char *exec_path; /* full path to my executable */
char *user; /* username for clusters */
@ -213,61 +234,55 @@ typedef struct
int num_libraries;
pgpid_t postmasterPID; /* PID of currently running postmaster */
Cluster running_cluster;
char *logfile; /* name of log file (may be /dev/null) */
FILE *log_fd; /* log FILE */
FILE *debug_fd; /* debug-level log FILE */
bool check; /* TRUE -> ask user for permission to make
* changes */
bool verbose; /* TRUE -> be verbose in messages */
bool debug; /* TRUE -> log more information */
transferMode transfer_mode; /* copy files or link them? */
} migratorContext;
} OSInfo;
/*
* Global variables
*/
extern Log log;
extern UserOpts user_opts;
extern ClusterInfo old_cluster, new_cluster;
extern OSInfo os_info;
extern char scandir_file_pattern[];
/* check.c */
void output_check_banner(migratorContext *ctx, bool *live_check);
void check_old_cluster(migratorContext *ctx, bool live_check,
void output_check_banner(bool *live_check);
void check_old_cluster(bool live_check,
char **sequence_script_file_name);
void check_new_cluster(migratorContext *ctx);
void report_clusters_compatible(migratorContext *ctx);
void issue_warnings(migratorContext *ctx,
void check_new_cluster(void);
void report_clusters_compatible(void);
void issue_warnings(
char *sequence_script_file_name);
void output_completion_banner(migratorContext *ctx,
void output_completion_banner(
char *deletion_script_file_name);
void check_cluster_versions(migratorContext *ctx);
void check_cluster_compatibility(migratorContext *ctx, bool live_check);
void create_script_for_old_cluster_deletion(migratorContext *ctx,
char **deletion_script_file_name);
void check_cluster_versions(void);
void check_cluster_compatibility(bool live_check);
void create_script_for_old_cluster_deletion(char **deletion_script_file_name);
/* controldata.c */
void get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check);
void check_control_data(migratorContext *ctx, ControlData *oldctrl,
void get_control_data(ClusterInfo *cluster, bool live_check);
void check_control_data(ControlData *oldctrl,
ControlData *newctrl);
/* dump.c */
void generate_old_dump(migratorContext *ctx);
void split_old_dump(migratorContext *ctx);
void generate_old_dump(void);
void split_old_dump(void);
/* exec.c */
int exec_prog(migratorContext *ctx, bool throw_error,
int exec_prog(bool throw_error,
const char *cmd,...);
void verify_directories(migratorContext *ctx);
bool is_server_running(migratorContext *ctx, const char *datadir);
void rename_old_pg_control(migratorContext *ctx);
void verify_directories(void);
bool is_server_running(const char *datadir);
void rename_old_pg_control(void);
/* file.c */
@ -296,88 +311,85 @@ typedef struct
pluginShutdown shutdown; /* Pointer to plugin's shutdown function */
} pageCnvCtx;
const char *setupPageConverter(migratorContext *ctx, pageCnvCtx **result);
const char *setupPageConverter(pageCnvCtx **result);
#else
/* dummy */
typedef void *pageCnvCtx;
#endif
int dir_matching_filenames(const struct dirent * scan_ent);
int pg_scandir(migratorContext *ctx, const char *dirname,
int pg_scandir(const char *dirname,
struct dirent *** namelist,
int (*selector) (const struct dirent *));
const char *copyAndUpdateFile(migratorContext *ctx,
const char *copyAndUpdateFile(
pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
const char *linkAndUpdateFile(migratorContext *ctx,
const char *linkAndUpdateFile(
pageCnvCtx *pageConverter, const char *src, const char *dst);
void check_hard_link(migratorContext *ctx);
void check_hard_link(void);
/* function.c */
void install_support_functions(migratorContext *ctx);
void uninstall_support_functions(migratorContext *ctx);
void get_loadable_libraries(migratorContext *ctx);
void check_loadable_libraries(migratorContext *ctx);
void install_support_functions(void);
void uninstall_support_functions(void);
void get_loadable_libraries(void);
void check_loadable_libraries(void);
/* info.c */
FileNameMap *gen_db_file_maps(migratorContext *ctx, DbInfo *old_db,
FileNameMap *gen_db_file_maps(DbInfo *old_db,
DbInfo *new_db, int *nmaps, const char *old_pgdata,
const char *new_pgdata);
void get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr,
void get_db_and_rel_infos(DbInfoArr *db_arr,
Cluster whichCluster);
DbInfo *dbarr_lookup_db(DbInfoArr *db_arr, const char *db_name);
void dbarr_free(DbInfoArr *db_arr);
void print_maps(migratorContext *ctx, FileNameMap *maps, int n,
void print_maps(FileNameMap *maps, int n,
const char *dbName);
/* option.c */
void parseCommandLine(migratorContext *ctx, int argc, char *argv[]);
void parseCommandLine(int argc, char *argv[]);
/* relfilenode.c */
void get_pg_database_relfilenode(migratorContext *ctx, Cluster whichCluster);
const char *transfer_all_new_dbs(migratorContext *ctx, DbInfoArr *olddb_arr,
void get_pg_database_relfilenode(Cluster whichCluster);
const char *transfer_all_new_dbs(DbInfoArr *olddb_arr,
DbInfoArr *newdb_arr, char *old_pgdata, char *new_pgdata);
/* tablespace.c */
void init_tablespaces(migratorContext *ctx);
void init_tablespaces(void);
/* server.c */
PGconn *connectToServer(migratorContext *ctx, const char *db_name,
PGconn *connectToServer(const char *db_name,
Cluster whichCluster);
PGresult *executeQueryOrDie(migratorContext *ctx, PGconn *conn,
PGresult *executeQueryOrDie(PGconn *conn,
const char *fmt,...);
void start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet);
void stop_postmaster(migratorContext *ctx, bool fast, bool quiet);
uint32 get_major_server_version(migratorContext *ctx, char **verstr,
void start_postmaster(Cluster whichCluster, bool quiet);
void stop_postmaster(bool fast, bool quiet);
uint32 get_major_server_version(char **verstr,
Cluster whichCluster);
void check_for_libpq_envvars(migratorContext *ctx);
void check_for_libpq_envvars(void);
/* util.c */
void exit_nicely(migratorContext *ctx, bool need_cleanup);
void *pg_malloc(migratorContext *ctx, int n);
void pg_free(void *p);
char *pg_strdup(migratorContext *ctx, const char *s);
char *quote_identifier(migratorContext *ctx, const char *s);
int get_user_info(migratorContext *ctx, char **user_name);
void check_ok(migratorContext *ctx);
void report_status(migratorContext *ctx, eLogType type, const char *fmt,...);
void pg_log(migratorContext *ctx, eLogType type, char *fmt,...);
void prep_status(migratorContext *ctx, const char *fmt,...);
void check_ok(migratorContext *ctx);
char *pg_strdup(migratorContext *ctx, const char *s);
void *pg_malloc(migratorContext *ctx, int size);
void exit_nicely(bool need_cleanup);
char *quote_identifier(const char *s);
int get_user_info(char **user_name);
void check_ok(void);
void report_status(eLogType type, const char *fmt,...);
void pg_log(eLogType type, char *fmt,...);
void prep_status(const char *fmt,...);
void check_ok(void);
char *pg_strdup(const char *s);
void *pg_malloc(int size);
void pg_free(void *ptr);
const char *getErrorText(int errNum);
unsigned int str2uint(const char *str);
@ -385,20 +397,17 @@ unsigned int str2uint(const char *str);
/* version.c */
void new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx,
void new_9_0_populate_pg_largeobject_metadata(
bool check_mode, Cluster whichCluster);
/* version_old_8_3.c */
void old_8_3_check_for_name_data_type_usage(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_tsquery_usage(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_rebuild_tsvector_tables(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
char *old_8_3_create_sequence_script(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_name_data_type_usage(Cluster whichCluster);
void old_8_3_check_for_tsquery_usage(Cluster whichCluster);
void old_8_3_rebuild_tsvector_tables(bool check_mode,
Cluster whichCluster);
void old_8_3_invalidate_hash_gin_indexes(bool check_mode,
Cluster whichCluster);
void old_8_3_invalidate_bpchar_pattern_ops_indexes(bool check_mode,
Cluster whichCluster);
char *old_8_3_create_sequence_script(Cluster whichCluster);

View File

@ -13,9 +13,9 @@
#include "access/transam.h"
static void transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
static void transfer_single_new_db(pageCnvCtx *pageConverter,
FileNameMap *maps, int size);
static void transfer_relfile(migratorContext *ctx, pageCnvCtx *pageConverter,
static void transfer_relfile(pageCnvCtx *pageConverter,
const char *fromfile, const char *tofile,
const char *oldnspname, const char *oldrelname,
const char *newnspname, const char *newrelname);
@ -30,13 +30,13 @@ char scandir_file_pattern[MAXPGPATH];
* physically link the databases.
*/
const char *
transfer_all_new_dbs(migratorContext *ctx, DbInfoArr *olddb_arr,
transfer_all_new_dbs(DbInfoArr *olddb_arr,
DbInfoArr *newdb_arr, char *old_pgdata, char *new_pgdata)
{
int dbnum;
const char *msg = NULL;
prep_status(ctx, "Restoring user relation files\n");
prep_status("Restoring user relation files\n");
for (dbnum = 0; dbnum < newdb_arr->ndbs; dbnum++)
{
@ -47,24 +47,24 @@ transfer_all_new_dbs(migratorContext *ctx, DbInfoArr *olddb_arr,
pageCnvCtx *pageConverter = NULL;
n_maps = 0;
mappings = gen_db_file_maps(ctx, old_db, new_db, &n_maps, old_pgdata,
mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata,
new_pgdata);
if (n_maps)
{
print_maps(ctx, mappings, n_maps, new_db->db_name);
print_maps(mappings, n_maps, new_db->db_name);
#ifdef PAGE_CONVERSION
msg = setupPageConverter(ctx, &pageConverter);
msg = setupPageConverter(&pageConverter);
#endif
transfer_single_new_db(ctx, pageConverter, mappings, n_maps);
transfer_single_new_db(pageConverter, mappings, n_maps);
pg_free(mappings);
}
}
prep_status(ctx, ""); /* in case nothing printed */
check_ok(ctx);
prep_status(""); /* in case nothing printed */
check_ok();
return msg;
}
@ -77,13 +77,14 @@ transfer_all_new_dbs(migratorContext *ctx, DbInfoArr *olddb_arr,
* relfilenodes later in the upgrade process.
*/
void
get_pg_database_relfilenode(migratorContext *ctx, Cluster whichCluster)
get_pg_database_relfilenode(Cluster whichCluster)
{
PGconn *conn = connectToServer(ctx, "template1", whichCluster);
PGconn *conn = connectToServer("template1", whichCluster);
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
PGresult *res;
int i_relfile;
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT c.relname, c.relfilenode "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n "
@ -93,10 +94,7 @@ get_pg_database_relfilenode(migratorContext *ctx, Cluster whichCluster)
"ORDER BY c.relname");
i_relfile = PQfnumber(res, "relfilenode");
if (whichCluster == CLUSTER_OLD)
ctx->old.pg_database_oid = atooid(PQgetvalue(res, 0, i_relfile));
else
ctx->new.pg_database_oid = atooid(PQgetvalue(res, 0, i_relfile));
active_cluster->pg_database_oid = atooid(PQgetvalue(res, 0, i_relfile));
PQclear(res);
PQfinish(conn);
@ -109,7 +107,7 @@ get_pg_database_relfilenode(migratorContext *ctx, Cluster whichCluster)
* create links for mappings stored in "maps" array.
*/
static void
transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
transfer_single_new_db(pageCnvCtx *pageConverter,
FileNameMap *maps, int size)
{
int mapnum;
@ -123,36 +121,39 @@ transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
/* Copying files might take some time, so give feedback. */
snprintf(old_file, sizeof(old_file), "%s/%u", maps[mapnum].old_file, maps[mapnum].old);
snprintf(new_file, sizeof(new_file), "%s/%u", maps[mapnum].new_file, maps[mapnum].new);
pg_log(ctx, PG_REPORT, OVERWRITE_MESSAGE, old_file);
snprintf(old_file, sizeof(old_file), "%s/%u", maps[mapnum].old_file,
maps[mapnum].old_relfilenode);
snprintf(new_file, sizeof(new_file), "%s/%u", maps[mapnum].new_file,
maps[mapnum].new_relfilenode);
pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_file);
/*
* Copy/link the relation file to the new cluster
*/
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
transfer_relfile(pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
/* fsm/vm files added in PG 8.4 */
if (GET_MAJOR_VERSION(ctx->old.major_version) >= 804)
if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804)
{
/*
* Now copy/link any fsm and vm files, if they exist
*/
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u_", maps[mapnum].old);
numFiles = pg_scandir(ctx, maps[mapnum].old_file, &namelist, dir_matching_filenames);
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u_",
maps[mapnum].old_relfilenode);
numFiles = pg_scandir(maps[mapnum].old_file, &namelist, dir_matching_filenames);
while (numFiles--)
{
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_file,
namelist[numFiles]->d_name);
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_file,
maps[mapnum].new, strchr(namelist[numFiles]->d_name, '_'));
maps[mapnum].new_relfilenode, strchr(namelist[numFiles]->d_name, '_'));
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
transfer_relfile(pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
@ -169,18 +170,19 @@ transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
* relfilenode.3, ... 'fsm' and 'vm' files use underscores so are not
* copied.
*/
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u.", maps[mapnum].old);
numFiles = pg_scandir(ctx, maps[mapnum].old_file, &namelist, dir_matching_filenames);
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u.",
maps[mapnum].old_relfilenode);
numFiles = pg_scandir(maps[mapnum].old_file, &namelist, dir_matching_filenames);
while (numFiles--)
{
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_file,
namelist[numFiles]->d_name);
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_file,
maps[mapnum].new, strchr(namelist[numFiles]->d_name, '.'));
maps[mapnum].new_relfilenode, strchr(namelist[numFiles]->d_name, '.'));
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
transfer_relfile(pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
@ -198,30 +200,30 @@ transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
* Copy or link file from old cluster to new one.
*/
static void
transfer_relfile(migratorContext *ctx, pageCnvCtx *pageConverter, const char *oldfile,
transfer_relfile(pageCnvCtx *pageConverter, const char *oldfile,
const char *newfile, const char *oldnspname, const char *oldrelname,
const char *newnspname, const char *newrelname)
{
const char *msg;
if ((ctx->transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
pg_log(ctx, PG_FATAL, "this migration requires page-by-page conversion, "
if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
pg_log(PG_FATAL, "this migration requires page-by-page conversion, "
"you must use copy-mode instead of link-mode\n");
if (ctx->transfer_mode == TRANSFER_MODE_COPY)
if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
{
pg_log(ctx, PG_INFO, "copying %s to %s\n", oldfile, newfile);
pg_log(PG_INFO, "copying %s to %s\n", oldfile, newfile);
if ((msg = copyAndUpdateFile(ctx, pageConverter, oldfile, newfile, true)) != NULL)
pg_log(ctx, PG_FATAL, "error while copying %s.%s(%s) to %s.%s(%s): %s\n",
if ((msg = copyAndUpdateFile(pageConverter, oldfile, newfile, true)) != NULL)
pg_log(PG_FATAL, "error while copying %s.%s(%s) to %s.%s(%s): %s\n",
oldnspname, oldrelname, oldfile, newnspname, newrelname, newfile, msg);
}
else
{
pg_log(ctx, PG_INFO, "linking %s to %s\n", newfile, oldfile);
pg_log(PG_INFO, "linking %s to %s\n", newfile, oldfile);
if ((msg = linkAndUpdateFile(ctx, pageConverter, oldfile, newfile)) != NULL)
pg_log(ctx, PG_FATAL,
if ((msg = linkAndUpdateFile(pageConverter, oldfile, newfile)) != NULL)
pg_log(PG_FATAL,
"error while creating link from %s.%s(%s) to %s.%s(%s): %s\n",
oldnspname, oldrelname, oldfile, newnspname, newrelname,
newfile, msg);

View File

@ -14,8 +14,8 @@
#define STARTUP_WARNING_TRIES 2
static pgpid_t get_postmaster_pid(migratorContext *ctx, const char *datadir);
static bool test_server_conn(migratorContext *ctx, int timeout,
static pgpid_t get_postmaster_pid(const char *datadir);
static bool test_server_conn(int timeout,
Cluster whichCluster);
@ -27,28 +27,28 @@ static bool test_server_conn(migratorContext *ctx, int timeout,
* message and calls exit_nicely() to kill the program.
*/
PGconn *
connectToServer(migratorContext *ctx, const char *db_name,
connectToServer(const char *db_name,
Cluster whichCluster)
{
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
unsigned short port = active_cluster->port;
char connectString[MAXPGPATH];
unsigned short port = (whichCluster == CLUSTER_OLD) ?
ctx->old.port : ctx->new.port;
PGconn *conn;
snprintf(connectString, sizeof(connectString),
"dbname = '%s' user = '%s' port = %d", db_name, ctx->user, port);
"dbname = '%s' user = '%s' port = %d", db_name, os_info.user, port);
conn = PQconnectdb(connectString);
if (conn == NULL || PQstatus(conn) != CONNECTION_OK)
{
pg_log(ctx, PG_REPORT, "Connection to database failed: %s\n",
pg_log(PG_REPORT, "Connection to database failed: %s\n",
PQerrorMessage(conn));
if (conn)
PQfinish(conn);
exit_nicely(ctx, true);
exit_nicely(true);
}
return conn;
@ -63,7 +63,7 @@ connectToServer(migratorContext *ctx, const char *db_name,
* message and calls exit_nicely() to kill the program.
*/
PGresult *
executeQueryOrDie(migratorContext *ctx, PGconn *conn, const char *fmt,...)
executeQueryOrDie(PGconn *conn, const char *fmt,...)
{
static char command[8192];
va_list args;
@ -74,17 +74,17 @@ executeQueryOrDie(migratorContext *ctx, PGconn *conn, const char *fmt,...)
vsnprintf(command, sizeof(command), fmt, args);
va_end(args);
pg_log(ctx, PG_DEBUG, "executing: %s\n", command);
pg_log(PG_DEBUG, "executing: %s\n", command);
result = PQexec(conn, command);
status = PQresultStatus(result);
if ((status != PGRES_TUPLES_OK) && (status != PGRES_COMMAND_OK))
{
pg_log(ctx, PG_REPORT, "DB command failed\n%s\n%s\n", command,
pg_log(PG_REPORT, "DB command failed\n%s\n%s\n", command,
PQerrorMessage(conn));
PQclear(result);
PQfinish(conn);
exit_nicely(ctx, true);
exit_nicely(true);
return NULL; /* Never get here, but keeps compiler happy */
}
else
@ -99,7 +99,7 @@ executeQueryOrDie(migratorContext *ctx, PGconn *conn, const char *fmt,...)
* from the postmaster.pid file
*/
static pgpid_t
get_postmaster_pid(migratorContext *ctx, const char *datadir)
get_postmaster_pid(const char *datadir)
{
FILE *pidf;
long pid;
@ -114,8 +114,8 @@ get_postmaster_pid(migratorContext *ctx, const char *datadir)
if (fscanf(pidf, "%ld", &pid) != 1)
{
fclose(pidf);
pg_log(ctx, PG_FATAL, "%s: invalid data in PID file \"%s\"\n",
ctx->progname, pid_file);
pg_log(PG_FATAL, "%s: invalid data in PID file \"%s\"\n",
os_info.progname, pid_file);
}
fclose(pidf);
@ -132,16 +132,16 @@ get_postmaster_pid(migratorContext *ctx, const char *datadir)
* is retrieved by reading the PG_VERSION file.
*/
uint32
get_major_server_version(migratorContext *ctx, char **verstr, Cluster whichCluster)
get_major_server_version(char **verstr, Cluster whichCluster)
{
const char *datadir = whichCluster == CLUSTER_OLD ?
ctx->old.pgdata : ctx->new.pgdata;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
const char *datadir = active_cluster->pgdata;
FILE *version_fd;
char ver_file[MAXPGPATH];
int integer_version = 0;
int fractional_version = 0;
*verstr = pg_malloc(ctx, 64);
*verstr = pg_malloc(64);
snprintf(ver_file, sizeof(ver_file), "%s/PG_VERSION", datadir);
if ((version_fd = fopen(ver_file, "r")) == NULL)
@ -150,7 +150,7 @@ get_major_server_version(migratorContext *ctx, char **verstr, Cluster whichClust
if (fscanf(version_fd, "%63s", *verstr) == 0 ||
sscanf(*verstr, "%d.%d", &integer_version, &fractional_version) != 2)
{
pg_log(ctx, PG_FATAL, "could not get version from %s\n", datadir);
pg_log(PG_FATAL, "could not get version from %s\n", datadir);
fclose(version_fd);
return 0;
}
@ -160,25 +160,17 @@ get_major_server_version(migratorContext *ctx, char **verstr, Cluster whichClust
void
start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet)
start_postmaster(Cluster whichCluster, bool quiet)
{
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
char cmd[MAXPGPATH];
const char *bindir;
const char *datadir;
unsigned short port;
if (whichCluster == CLUSTER_OLD)
{
bindir = ctx->old.bindir;
datadir = ctx->old.pgdata;
port = ctx->old.port;
}
else
{
bindir = ctx->new.bindir;
datadir = ctx->new.pgdata;
port = ctx->new.port;
}
bindir = active_cluster->bindir;
datadir = active_cluster->pgdata;
port = active_cluster->port;
/*
* On Win32, we can't send both pg_upgrade output and pg_ctl output to the
@ -193,40 +185,40 @@ start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet)
"start >> \"%s\" 2>&1" SYSTEMQUOTE,
bindir,
#ifndef WIN32
ctx->logfile, datadir, port, ctx->logfile);
log.filename, datadir, port, log.filename);
#else
DEVNULL, datadir, port, DEVNULL);
#endif
exec_prog(ctx, true, "%s", cmd);
exec_prog(true, "%s", cmd);
/* wait for the server to start properly */
if (test_server_conn(ctx, POSTMASTER_UPTIME, whichCluster) == false)
pg_log(ctx, PG_FATAL, " Unable to start %s postmaster with the command: %s\nPerhaps pg_hba.conf was not set to \"trust\".",
if (test_server_conn(POSTMASTER_UPTIME, whichCluster) == false)
pg_log(PG_FATAL, " Unable to start %s postmaster with the command: %s\nPerhaps pg_hba.conf was not set to \"trust\".",
CLUSTERNAME(whichCluster), cmd);
if ((ctx->postmasterPID = get_postmaster_pid(ctx, datadir)) == 0)
pg_log(ctx, PG_FATAL, " Unable to get postmaster pid\n");
ctx->running_cluster = whichCluster;
if ((os_info.postmasterPID = get_postmaster_pid(datadir)) == 0)
pg_log(PG_FATAL, " Unable to get postmaster pid\n");
os_info.running_cluster = whichCluster;
}
void
stop_postmaster(migratorContext *ctx, bool fast, bool quiet)
stop_postmaster(bool fast, bool quiet)
{
char cmd[MAXPGPATH];
const char *bindir;
const char *datadir;
if (ctx->running_cluster == CLUSTER_OLD)
if (os_info.running_cluster == CLUSTER_OLD)
{
bindir = ctx->old.bindir;
datadir = ctx->old.pgdata;
bindir = old_cluster.bindir;
datadir = old_cluster.pgdata;
}
else if (ctx->running_cluster == CLUSTER_NEW)
else if (os_info.running_cluster == CLUSTER_NEW)
{
bindir = ctx->new.bindir;
datadir = ctx->new.pgdata;
bindir = new_cluster.bindir;
datadir = new_cluster.pgdata;
}
else
return; /* no cluster running */
@ -237,14 +229,14 @@ stop_postmaster(migratorContext *ctx, bool fast, bool quiet)
"\"%s\" 2>&1" SYSTEMQUOTE,
bindir,
#ifndef WIN32
ctx->logfile, datadir, fast ? "-m fast" : "", ctx->logfile);
log.filename, datadir, fast ? "-m fast" : "", log.filename);
#else
DEVNULL, datadir, fast ? "-m fast" : "", DEVNULL);
#endif
exec_prog(ctx, fast ? false : true, "%s", cmd);
exec_prog(fast ? false : true, "%s", cmd);
ctx->postmasterPID = 0;
ctx->running_cluster = NONE;
os_info.postmasterPID = 0;
os_info.running_cluster = NONE;
}
@ -258,17 +250,17 @@ stop_postmaster(migratorContext *ctx, bool fast, bool quiet)
* Returns true if the connection attempt was successfull, false otherwise.
*/
static bool
test_server_conn(migratorContext *ctx, int timeout, Cluster whichCluster)
test_server_conn(int timeout, Cluster whichCluster)
{
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
unsigned short port = active_cluster->port;
PGconn *conn = NULL;
char con_opts[MAX_STRING];
int tries;
unsigned short port = (whichCluster == CLUSTER_OLD) ?
ctx->old.port : ctx->new.port;
bool ret = false;
snprintf(con_opts, sizeof(con_opts),
"dbname = 'template1' user = '%s' port = %d ", ctx->user, port);
"dbname = 'template1' user = '%s' port = %d ", os_info.user, port);
for (tries = 0; tries < timeout; tries++)
{
@ -282,14 +274,14 @@ test_server_conn(migratorContext *ctx, int timeout, Cluster whichCluster)
}
if (tries == STARTUP_WARNING_TRIES)
prep_status(ctx, "Trying to start %s server ",
prep_status("Trying to start %s server ",
CLUSTERNAME(whichCluster));
else if (tries > STARTUP_WARNING_TRIES)
pg_log(ctx, PG_REPORT, ".");
pg_log(PG_REPORT, ".");
}
if (tries > STARTUP_WARNING_TRIES)
check_ok(ctx);
check_ok();
return ret;
}
@ -305,7 +297,7 @@ test_server_conn(migratorContext *ctx, int timeout, Cluster whichCluster)
* If any are found, will log them and cancel.
*/
void
check_for_libpq_envvars(migratorContext *ctx)
check_for_libpq_envvars(void)
{
PQconninfoOption *option;
PQconninfoOption *start;
@ -323,7 +315,7 @@ check_for_libpq_envvars(migratorContext *ctx)
{
found = true;
pg_log(ctx, PG_WARNING,
pg_log(PG_WARNING,
"libpq env var %-20s is currently set to: %s\n", option->envvar, value);
}
@ -334,6 +326,6 @@ check_for_libpq_envvars(migratorContext *ctx)
PQconninfoFree(start);
if (found)
pg_log(ctx, PG_FATAL,
pg_log(PG_FATAL,
"libpq env vars have been found and listed above, please unset them for pg_upgrade\n");
}

View File

@ -9,22 +9,22 @@
#include "pg_upgrade.h"
static void get_tablespace_paths(migratorContext *ctx);
static void set_tablespace_directory_suffix(migratorContext *ctx,
static void get_tablespace_paths(void);
static void set_tablespace_directory_suffix(
Cluster whichCluster);
void
init_tablespaces(migratorContext *ctx)
init_tablespaces(void)
{
get_tablespace_paths(ctx);
get_tablespace_paths();
set_tablespace_directory_suffix(ctx, CLUSTER_OLD);
set_tablespace_directory_suffix(ctx, CLUSTER_NEW);
set_tablespace_directory_suffix(CLUSTER_OLD);
set_tablespace_directory_suffix(CLUSTER_NEW);
if (ctx->num_tablespaces > 0 &&
strcmp(ctx->old.tablespace_suffix, ctx->new.tablespace_suffix) == 0)
pg_log(ctx, PG_FATAL,
if (os_info.num_tablespaces > 0 &&
strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0)
pg_log(PG_FATAL,
"Cannot migrate to/from the same system catalog version when\n"
"using tablespaces.\n");
}
@ -37,29 +37,29 @@ init_tablespaces(migratorContext *ctx)
* paths. Its the caller's responsibility to free the array.
*/
static void
get_tablespace_paths(migratorContext *ctx)
get_tablespace_paths(void)
{
PGconn *conn = connectToServer(ctx, "template1", CLUSTER_OLD);
PGconn *conn = connectToServer("template1", CLUSTER_OLD);
PGresult *res;
int tblnum;
int i_spclocation;
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT spclocation "
"FROM pg_catalog.pg_tablespace "
"WHERE spcname != 'pg_default' AND "
" spcname != 'pg_global'");
if ((ctx->num_tablespaces = PQntuples(res)) != 0)
ctx->tablespaces = (char **) pg_malloc(ctx,
ctx->num_tablespaces * sizeof(char *));
if ((os_info.num_tablespaces = PQntuples(res)) != 0)
os_info.tablespaces = (char **) pg_malloc(
os_info.num_tablespaces * sizeof(char *));
else
ctx->tablespaces = NULL;
os_info.tablespaces = NULL;
i_spclocation = PQfnumber(res, "spclocation");
for (tblnum = 0; tblnum < ctx->num_tablespaces; tblnum++)
ctx->tablespaces[tblnum] = pg_strdup(ctx,
for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++)
os_info.tablespaces[tblnum] = pg_strdup(
PQgetvalue(res, tblnum, i_spclocation));
PQclear(res);
@ -71,20 +71,21 @@ get_tablespace_paths(migratorContext *ctx)
static void
set_tablespace_directory_suffix(migratorContext *ctx, Cluster whichCluster)
set_tablespace_directory_suffix(Cluster whichCluster)
{
ClusterInfo *cluster = (whichCluster == CLUSTER_OLD) ? &ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
if (GET_MAJOR_VERSION(cluster->major_version) <= 804)
cluster->tablespace_suffix = pg_strdup(ctx, "");
if (GET_MAJOR_VERSION(active_cluster->major_version) <= 804)
active_cluster->tablespace_suffix = pg_strdup("");
else
{
/* This cluster has a version-specific subdirectory */
cluster->tablespace_suffix = pg_malloc(ctx, 4 + strlen(cluster->major_version_str) +
10 /* OIDCHARS */ + 1);
active_cluster->tablespace_suffix = pg_malloc(4 +
strlen(active_cluster->major_version_str) +
10 /* OIDCHARS */ + 1);
/* The leading slash is needed to start a new directory. */
sprintf(cluster->tablespace_suffix, "/PG_%s_%d", cluster->major_version_str,
cluster->controldata.cat_ver);
sprintf(active_cluster->tablespace_suffix, "/PG_%s_%d", active_cluster->major_version_str,
active_cluster->controldata.cat_ver);
}
}

View File

@ -12,13 +12,15 @@
#include <signal.h>
Log log;
/*
* report_status()
*
* Displays the result of an operation (ok, failed, error message,...)
*/
void
report_status(migratorContext *ctx, eLogType type, const char *fmt,...)
report_status(eLogType type, const char *fmt,...)
{
va_list args;
char message[MAX_STRING];
@ -27,27 +29,27 @@ report_status(migratorContext *ctx, eLogType type, const char *fmt,...)
vsnprintf(message, sizeof(message), fmt, args);
va_end(args);
pg_log(ctx, type, "%s\n", message);
pg_log(type, "%s\n", message);
}
/*
* prep_status(&ctx, )
* prep_status
*
* Displays a message that describes an operation we are about to begin.
* We pad the message out to MESSAGE_WIDTH characters so that all of the "ok" and
* "failed" indicators line up nicely.
*
* A typical sequence would look like this:
* prep_status(&ctx, "about to flarb the next %d files", fileCount );
* prep_status("about to flarb the next %d files", fileCount );
*
* if(( message = flarbFiles(fileCount)) == NULL)
* report_status(ctx, PG_REPORT, "ok" );
* report_status(PG_REPORT, "ok" );
* else
* pg_log(ctx, PG_FATAL, "failed - %s", message );
* pg_log(PG_FATAL, "failed - %s", message );
*/
void
prep_status(migratorContext *ctx, const char *fmt,...)
prep_status(const char *fmt,...)
{
va_list args;
char message[MAX_STRING];
@ -57,14 +59,14 @@ prep_status(migratorContext *ctx, const char *fmt,...)
va_end(args);
if (strlen(message) > 0 && message[strlen(message) - 1] == '\n')
pg_log(ctx, PG_REPORT, "%s", message);
pg_log(PG_REPORT, "%s", message);
else
pg_log(ctx, PG_REPORT, "%-" MESSAGE_WIDTH "s", message);
pg_log(PG_REPORT, "%-" MESSAGE_WIDTH "s", message);
}
void
pg_log(migratorContext *ctx, eLogType type, char *fmt,...)
pg_log(eLogType type, char *fmt,...)
{
va_list args;
char message[MAX_STRING];
@ -73,19 +75,19 @@ pg_log(migratorContext *ctx, eLogType type, char *fmt,...)
vsnprintf(message, sizeof(message), fmt, args);
va_end(args);
if (ctx->log_fd != NULL)
if (log.fd != NULL)
{
fwrite(message, strlen(message), 1, ctx->log_fd);
fwrite(message, strlen(message), 1, log.fd);
/* if we are using OVERWRITE_MESSAGE, add newline */
if (strchr(message, '\r') != NULL)
fwrite("\n", 1, 1, ctx->log_fd);
fflush(ctx->log_fd);
fwrite("\n", 1, 1, log.fd);
fflush(log.fd);
}
switch (type)
{
case PG_INFO:
if (ctx->verbose)
if (log.verbose)
printf("%s", _(message));
break;
@ -97,12 +99,12 @@ pg_log(migratorContext *ctx, eLogType type, char *fmt,...)
case PG_FATAL:
printf("%s", "\n");
printf("%s", _(message));
exit_nicely(ctx, true);
exit_nicely(true);
break;
case PG_DEBUG:
if (ctx->debug)
fprintf(ctx->debug_fd, "%s\n", _(message));
if (log.debug)
fprintf(log.debug_fd, "%s\n", _(message));
break;
default:
@ -113,10 +115,10 @@ pg_log(migratorContext *ctx, eLogType type, char *fmt,...)
void
check_ok(migratorContext *ctx)
check_ok(void)
{
/* all seems well */
report_status(ctx, PG_REPORT, "ok");
report_status(PG_REPORT, "ok");
fflush(stdout);
}
@ -129,9 +131,9 @@ check_ok(migratorContext *ctx)
* memory leakage is not a big deal in this program.
*/
char *
quote_identifier(migratorContext *ctx, const char *s)
quote_identifier(const char *s)
{
char *result = pg_malloc(ctx, strlen(s) * 2 + 3);
char *result = pg_malloc(strlen(s) * 2 + 3);
char *r = result;
*r++ = '"';
@ -154,7 +156,7 @@ quote_identifier(migratorContext *ctx, const char *s)
* (copied from initdb.c) find the current user
*/
int
get_user_info(migratorContext *ctx, char **user_name)
get_user_info(char **user_name)
{
int user_id;
@ -176,28 +178,28 @@ get_user_info(migratorContext *ctx, char **user_name)
user_id = 1;
#endif
*user_name = pg_strdup(ctx, pw->pw_name);
*user_name = pg_strdup(pw->pw_name);
return user_id;
}
void
exit_nicely(migratorContext *ctx, bool need_cleanup)
exit_nicely(bool need_cleanup)
{
stop_postmaster(ctx, true, true);
stop_postmaster(true, true);
pg_free(ctx->logfile);
pg_free(log.filename);
if (ctx->log_fd)
fclose(ctx->log_fd);
if (log.fd)
fclose(log.fd);
if (ctx->debug_fd)
fclose(ctx->debug_fd);
if (log.debug_fd)
fclose(log.debug_fd);
/* terminate any running instance of postmaster */
if (ctx->postmasterPID != 0)
kill(ctx->postmasterPID, SIGTERM);
if (os_info.postmasterPID != 0)
kill(os_info.postmasterPID, SIGTERM);
if (need_cleanup)
{
@ -212,12 +214,12 @@ exit_nicely(migratorContext *ctx, bool need_cleanup)
void *
pg_malloc(migratorContext *ctx, int n)
pg_malloc(int n)
{
void *p = malloc(n);
if (p == NULL)
pg_log(ctx, PG_FATAL, "%s: out of memory\n", ctx->progname);
pg_log(PG_FATAL, "%s: out of memory\n", os_info.progname);
return p;
}
@ -232,12 +234,12 @@ pg_free(void *p)
char *
pg_strdup(migratorContext *ctx, const char *s)
pg_strdup(const char *s)
{
char *result = strdup(s);
if (result == NULL)
pg_log(ctx, PG_FATAL, "%s: out of memory\n", ctx->progname);
pg_log(PG_FATAL, "%s: out of memory\n", os_info.progname);
return result;
}

View File

@ -18,30 +18,29 @@
* 9.0 has a new pg_largeobject permission table
*/
void
new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx, bool check_mode,
new_9_0_populate_pg_largeobject_metadata(bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for large objects");
prep_status("Checking for large objects");
snprintf(output_path, sizeof(output_path), "%s/pg_largeobject.sql",
ctx->cwd);
os_info.cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
int i_count;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
/* find if there are any large objects */
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT count(*) "
"FROM pg_catalog.pg_largeobject ");
@ -52,9 +51,9 @@ new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx, bool check_mode,
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
fprintf(script, "\\connect %s\n",
quote_identifier(ctx, active_db->db_name));
quote_identifier(active_db->db_name));
fprintf(script,
"SELECT pg_catalog.lo_create(t.loid)\n"
"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
@ -69,16 +68,16 @@ new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx, bool check_mode,
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
report_status(PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
pg_log(PG_WARNING, "\n"
"| Your installation contains large objects.\n"
"| The new database has an additional large object\n"
"| permission table. After migration, you will be\n"
"| given a command to populate the pg_largeobject\n"
"| permission table with default permissions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
pg_log(PG_WARNING, "\n"
"| Your installation contains large objects.\n"
"| The new database has an additional large object\n"
"| permission table so default permissions must be\n"
@ -89,5 +88,5 @@ new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx, bool check_mode,
output_path);
}
else
check_ok(ctx);
check_ok();
}

View File

@ -19,19 +19,18 @@
* checks tables and indexes.
*/
void
old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluster)
old_8_3_check_for_name_data_type_usage(Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for invalid 'name' user columns");
prep_status("Checking for invalid 'name' user columns");
snprintf(output_path, sizeof(output_path), "%s/tables_using_name.txt",
ctx->cwd);
os_info.cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -43,7 +42,7 @@ old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluste
i_relname,
i_attname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
/*
* With a smaller alignment in 8.4, 'name' cannot be used in a
@ -51,7 +50,7 @@ old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluste
* that condition with enough analysis, but it seems not worth the
* trouble.)
*/
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
@ -72,7 +71,7 @@ old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluste
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@ -92,8 +91,8 @@ old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluste
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"| Your installation contains the \"name\" data type in\n"
"| user tables. This data type changed its internal\n"
"| alignment between your old and new clusters so this\n"
@ -103,7 +102,7 @@ old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluste
"| \t%s\n\n", output_path);
}
else
check_ok(ctx);
check_ok();
}
@ -114,19 +113,18 @@ old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluste
* so migration of such fields is impossible.
*/
void
old_8_3_check_for_tsquery_usage(migratorContext *ctx, Cluster whichCluster)
old_8_3_check_for_tsquery_usage(Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for tsquery user columns");
prep_status("Checking for tsquery user columns");
snprintf(output_path, sizeof(output_path), "%s/tables_using_tsquery.txt",
ctx->cwd);
os_info.cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -138,10 +136,10 @@ old_8_3_check_for_tsquery_usage(migratorContext *ctx, Cluster whichCluster)
i_relname,
i_attname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
/* Find any user-defined tsquery columns */
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
@ -162,7 +160,7 @@ old_8_3_check_for_tsquery_usage(migratorContext *ctx, Cluster whichCluster)
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@ -182,8 +180,8 @@ old_8_3_check_for_tsquery_usage(migratorContext *ctx, Cluster whichCluster)
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"| Your installation contains the \"tsquery\" data type.\n"
"| This data type added a new internal field between\n"
"| your old and new clusters so this cluster cannot\n"
@ -193,7 +191,7 @@ old_8_3_check_for_tsquery_usage(migratorContext *ctx, Cluster whichCluster)
"| \t%s\n\n", output_path);
}
else
check_ok(ctx);
check_ok();
}
@ -210,20 +208,19 @@ old_8_3_check_for_tsquery_usage(migratorContext *ctx, Cluster whichCluster)
* 'c' 'bb' 'aaa' -- 8.3
*/
void
old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
old_8_3_rebuild_tsvector_tables(bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for tsvector user columns");
prep_status("Checking for tsvector user columns");
snprintf(output_path, sizeof(output_path), "%s/rebuild_tsvector_tables.sql",
ctx->cwd);
os_info.cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -237,10 +234,10 @@ old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
i_relname,
i_attname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
/* Find any user-defined tsvector columns */
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
@ -281,11 +278,11 @@ old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "\\connect %s\n\n",
quote_identifier(ctx, active_db->db_name));
quote_identifier(active_db->db_name));
db_used = true;
}
@ -296,8 +293,8 @@ old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
if (strlen(old_nspname) != 0 || strlen(old_relname) != 0)
fprintf(script, ";\n\n");
fprintf(script, "ALTER TABLE %s.%s\n",
quote_identifier(ctx, PQgetvalue(res, rowno, i_nspname)),
quote_identifier(ctx, PQgetvalue(res, rowno, i_relname)));
quote_identifier(PQgetvalue(res, rowno, i_nspname)),
quote_identifier(PQgetvalue(res, rowno, i_relname)));
}
else
fprintf(script, ",\n");
@ -307,8 +304,8 @@ old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
fprintf(script, "ALTER COLUMN %s "
/* This could have been a custom conversion function call. */
"TYPE pg_catalog.tsvector USING %s::pg_catalog.text::pg_catalog.tsvector",
quote_identifier(ctx, PQgetvalue(res, rowno, i_attname)),
quote_identifier(ctx, PQgetvalue(res, rowno, i_attname)));
quote_identifier(PQgetvalue(res, rowno, i_attname)),
quote_identifier(PQgetvalue(res, rowno, i_attname)));
}
}
if (strlen(old_nspname) != 0 || strlen(old_relname) != 0)
@ -325,16 +322,16 @@ old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
report_status(PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
pg_log(PG_WARNING, "\n"
"| Your installation contains tsvector columns.\n"
"| The tsvector internal storage format changed\n"
"| between your old and new clusters so the tables\n"
"| must be rebuilt. After migration, you will be\n"
"| given instructions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
pg_log(PG_WARNING, "\n"
"| Your installation contains tsvector columns.\n"
"| The tsvector internal storage format changed\n"
"| between your old and new clusters so the tables\n"
@ -345,7 +342,7 @@ old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
output_path);
}
else
check_ok(ctx);
check_ok();
}
@ -355,20 +352,19 @@ old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
* Hash, Gin, and GiST index binary format has changes from 8.3->8.4
*/
void
old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
old_8_3_invalidate_hash_gin_indexes(bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for hash and gin indexes");
prep_status("Checking for hash and gin indexes");
snprintf(output_path, sizeof(output_path), "%s/reindex_hash_and_gin.sql",
ctx->cwd);
os_info.cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -379,10 +375,10 @@ old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
int i_nspname,
i_relname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
/* find hash and gin indexes */
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_index i, "
@ -403,16 +399,16 @@ old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "\\connect %s\n",
quote_identifier(ctx, active_db->db_name));
quote_identifier(active_db->db_name));
db_used = true;
}
fprintf(script, "REINDEX INDEX %s.%s;\n",
quote_identifier(ctx, PQgetvalue(res, rowno, i_nspname)),
quote_identifier(ctx, PQgetvalue(res, rowno, i_relname)));
quote_identifier(PQgetvalue(res, rowno, i_nspname)),
quote_identifier(PQgetvalue(res, rowno, i_relname)));
}
}
@ -420,7 +416,7 @@ old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
if (!check_mode && found)
/* mark hash and gin indexes as invalid */
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"UPDATE pg_catalog.pg_index i "
"SET indisvalid = false "
"FROM pg_catalog.pg_class c, "
@ -438,9 +434,9 @@ old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
report_status(PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
pg_log(PG_WARNING, "\n"
"| Your installation contains hash and/or gin\n"
"| indexes. These indexes have different\n"
"| internal formats between your old and new\n"
@ -448,7 +444,7 @@ old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
"| REINDEX command. After migration, you will\n"
"| be given REINDEX instructions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
pg_log(PG_WARNING, "\n"
"| Your installation contains hash and/or gin\n"
"| indexes. These indexes have different internal\n"
"| formats between your old and new clusters so\n"
@ -461,7 +457,7 @@ old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
output_path);
}
else
check_ok(ctx);
check_ok();
}
@ -471,20 +467,19 @@ old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
* 8.4 bpchar_pattern_ops no longer sorts based on trailing spaces
*/
void
old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_mode,
old_8_3_invalidate_bpchar_pattern_ops_indexes(bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for bpchar_pattern_ops indexes");
prep_status("Checking for bpchar_pattern_ops indexes");
snprintf(output_path, sizeof(output_path), "%s/reindex_bpchar_ops.sql",
ctx->cwd);
os_info.cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -495,7 +490,7 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
int i_nspname,
i_relname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
/* find bpchar_pattern_ops indexes */
@ -503,7 +498,7 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
* Do only non-hash, non-gin indexees; we already invalidated them
* above; no need to reindex twice
*/
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname "
"FROM pg_catalog.pg_index i, "
" pg_catalog.pg_class c, "
@ -529,16 +524,16 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "\\connect %s\n",
quote_identifier(ctx, active_db->db_name));
quote_identifier(active_db->db_name));
db_used = true;
}
fprintf(script, "REINDEX INDEX %s.%s;\n",
quote_identifier(ctx, PQgetvalue(res, rowno, i_nspname)),
quote_identifier(ctx, PQgetvalue(res, rowno, i_relname)));
quote_identifier(PQgetvalue(res, rowno, i_nspname)),
quote_identifier(PQgetvalue(res, rowno, i_relname)));
}
}
@ -546,7 +541,7 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
if (!check_mode && found)
/* mark bpchar_pattern_ops indexes as invalid */
PQclear(executeQueryOrDie(ctx, conn,
PQclear(executeQueryOrDie(conn,
"UPDATE pg_catalog.pg_index i "
"SET indisvalid = false "
"FROM pg_catalog.pg_class c, "
@ -569,9 +564,9 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
report_status(PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
pg_log(PG_WARNING, "\n"
"| Your installation contains indexes using\n"
"| \"bpchar_pattern_ops\". These indexes have\n"
"| different internal formats between your old and\n"
@ -579,7 +574,7 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
"| REINDEX command. After migration, you will be\n"
"| given REINDEX instructions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
pg_log(PG_WARNING, "\n"
"| Your installation contains indexes using\n"
"| \"bpchar_pattern_ops\". These indexes have\n"
"| different internal formats between your old and\n"
@ -592,7 +587,7 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
output_path);
}
else
check_ok(ctx);
check_ok();
}
@ -607,18 +602,17 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
* server, even in link mode.
*/
char *
old_8_3_create_sequence_script(migratorContext *ctx, Cluster whichCluster)
old_8_3_create_sequence_script(Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
int dbnum;
FILE *script = NULL;
bool found = false;
char *output_path = pg_malloc(ctx, MAXPGPATH);
char *output_path = pg_malloc(MAXPGPATH);
snprintf(output_path, MAXPGPATH, "%s/adjust_sequences.sql", ctx->cwd);
snprintf(output_path, MAXPGPATH, "%s/adjust_sequences.sql", os_info.cwd);
prep_status(ctx, "Creating script to adjust sequences");
prep_status("Creating script to adjust sequences");
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -629,10 +623,10 @@ old_8_3_create_sequence_script(migratorContext *ctx, Cluster whichCluster)
int i_nspname,
i_relname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
PGconn *conn = connectToServer(active_db->db_name, whichCluster);
/* Find any sequences */
res = executeQueryOrDie(ctx, conn,
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n "
@ -655,27 +649,27 @@ old_8_3_create_sequence_script(migratorContext *ctx, Cluster whichCluster)
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
pg_log(PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "\\connect %s\n\n",
quote_identifier(ctx, active_db->db_name));
quote_identifier(active_db->db_name));
db_used = true;
}
/* Find the desired sequence */
seq_res = executeQueryOrDie(ctx, conn,
seq_res = executeQueryOrDie(conn,
"SELECT s.last_value, s.is_called "
"FROM %s.%s s",
quote_identifier(ctx, nspname),
quote_identifier(ctx, relname));
quote_identifier(nspname),
quote_identifier(relname));
assert(PQntuples(seq_res) == 1);
i_last_value = PQfnumber(seq_res, "last_value");
i_is_called = PQfnumber(seq_res, "is_called");
fprintf(script, "SELECT setval('%s.%s', %s, '%s');\n",
quote_identifier(ctx, nspname), quote_identifier(ctx, relname),
quote_identifier(nspname), quote_identifier(relname),
PQgetvalue(seq_res, 0, i_last_value), PQgetvalue(seq_res, 0, i_is_called));
PQclear(seq_res);
}
@ -689,7 +683,7 @@ old_8_3_create_sequence_script(migratorContext *ctx, Cluster whichCluster)
if (found)
fclose(script);
check_ok(ctx);
check_ok();
if (found)
return output_path;