Fix pg_upgrade for oid removal.

pg_upgrade previously copied pg_largeobject_metadata over from the old
cluster. That doesn't work, because the table has oids before
578b229718. I missed that.

As most pieces of metadata for large objects already were dumped as
DDL (except for comments overwritten by pg_upgrade, due to the copy of
pg_largeobject_metadata) it seems reasonable to just also dump grants
for large objects.  If we ever consider this a relevant performance
problem, we'd need to fix the rest of the already emitted DDL
too.

There's still an open discussion about whether we'll want to force a
specific ordering for the dumped objects, as currently
pg_largeobjects_metadata potentially has a different ordering
before/after pg_upgrade, which can make automated testing a bit
harder.

Reported-By: Andrew Dunstan
Author: Andres Freund
Discussion: https://postgr.es/m/91a8a980-41bc-412b-fba2-2ba71a141c2b@2ndQuadrant.com
This commit is contained in:
Andres Freund 2018-11-26 14:20:36 -08:00
parent 70d7e507ef
commit 12a53c732c
4 changed files with 15 additions and 62 deletions

View File

@ -2866,8 +2866,8 @@ dumpDatabase(Archive *fout)
NULL, NULL);
/*
* pg_largeobject and pg_largeobject_metadata come from the old system
* intact, so set their relfrozenxids and relminmxids.
* pg_largeobject comes from the old system intact, so set its
* relfrozenxids and relminmxids.
*/
if (dopt->binary_upgrade)
{
@ -2912,47 +2912,6 @@ dumpDatabase(Archive *fout)
PQclear(lo_res);
/*
* pg_largeobject_metadata
*/
if (fout->remoteVersion >= 90000)
{
resetPQExpBuffer(loFrozenQry);
resetPQExpBuffer(loOutQry);
if (fout->remoteVersion >= 90300)
appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
"FROM pg_catalog.pg_class\n"
"WHERE oid = %u;\n",
LargeObjectMetadataRelationId);
else
appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
"FROM pg_catalog.pg_class\n"
"WHERE oid = %u;\n",
LargeObjectMetadataRelationId);
lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
i_relminmxid = PQfnumber(lo_res, "relminmxid");
appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
"WHERE oid = %u;\n",
atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
LargeObjectMetadataRelationId);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
"pg_largeobject_metadata", NULL, NULL, "",
"pg_largeobject_metadata", SECTION_PRE_DATA,
loOutQry->data, "", NULL,
NULL, 0,
NULL, NULL);
PQclear(lo_res);
}
destroyPQExpBuffer(loFrozenQry);
destroyPQExpBuffer(loOutQry);
}
@ -3266,18 +3225,14 @@ getBlobs(Archive *fout)
binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
/*
* In binary-upgrade mode for blobs, we do *not* dump out the data or
* the ACLs, should any exist. The data and ACL (if any) will be
* copied by pg_upgrade, which simply copies the pg_largeobject and
* pg_largeobject_metadata tables.
*
* We *do* dump out the definition of the blob because we need that to
* make the restoration of the comments, and anything else, work since
* pg_upgrade copies the files behind pg_largeobject and
* pg_largeobject_metadata after the dump is restored.
* In binary-upgrade mode for blobs, we do *not* dump out the blob
* data, as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
* pg_largeobject_metadata, after the dump is restored.
*/
if (dopt->binary_upgrade)
binfo[i].dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL);
binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
}
/*

View File

@ -2791,9 +2791,9 @@ my %tests = (
data_only => 1,
section_pre_data => 1,
test_schema_plus_blobs => 1,
binary_upgrade => 1,
},
unlike => {
binary_upgrade => 1,
no_blobs => 1,
no_privs => 1,
schema_only => 1,

View File

@ -441,8 +441,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
*
* pg_largeobject contains user data that does not appear in pg_dump
* output, so we have to copy that system table. It's easiest to do that
* by treating it as a user table. Likewise for pg_largeobject_metadata,
* if it exists.
* by treating it as a user table.
*/
snprintf(query + strlen(query), sizeof(query) - strlen(query),
"WITH regular_heap (reloid, indtable, toastheap) AS ( "
@ -458,10 +457,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
" 'binary_upgrade', 'pg_toast') AND "
" c.oid >= %u::pg_catalog.oid) OR "
" (n.nspname = 'pg_catalog' AND "
" relname IN ('pg_largeobject'%s) ))), ",
FirstNormalObjectId,
(GET_MAJOR_VERSION(old_cluster.major_version) >= 900) ?
", 'pg_largeobject_metadata'" : "");
" relname IN ('pg_largeobject') ))), ",
FirstNormalObjectId);
/*
* Add a CTE that collects OIDs of toast tables belonging to the tables

View File

@ -28,8 +28,9 @@
* We control all assignments of pg_enum.oid because these oids are stored
* in user tables as enum values.
*
* We control all assignments of pg_authid.oid because these oids are stored
* in pg_largeobject_metadata.
* We control all assignments of pg_authid.oid for historical reasons (the
* oids used to be stored in pg_largeobject_metadata, which is now copied via
* SQL commands), that might change at some point in the future.
*/