In pg_dump, dump SEQUENCE SET items in the data not pre-data section.

Represent a sequence's current value as a separate TableDataInfo dumpable
object, so that it can be dumped within the data section of the archive
rather than in pre-data.  This fixes an undesirable inconsistency between
the meanings of "--data-only" and "--section=data", and also fixes dumping
of sequences that are marked as extension configuration tables, as per a
report from Marko Kreen back in July.  The main cost is that we do one more
SQL query per sequence, but that's probably not very meaningful in most
databases.

Back-patch to 9.1, since it has the extension configuration issue even
though not the --section switch.
This commit is contained in:
Tom Lane 2012-10-26 12:12:42 -04:00
parent bf01e34b55
commit 5a39114fe7
2 changed files with 176 additions and 163 deletions

View File

@ -739,11 +739,11 @@ PostgreSQL documentation
sections. The default is to dump all sections. sections. The default is to dump all sections.
</para> </para>
<para> <para>
The data section contains actual table data as well as large-object The data section contains actual table data, large-object
definitions. contents, and sequence values.
Post-data items consist of definitions of indexes, triggers, rules Post-data items include definitions of indexes, triggers, rules,
and constraints other than validated check constraints. and constraints other than validated check constraints.
Pre-data items consist of all other data definition items. Pre-data items include all other data definition items.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>

View File

@ -192,6 +192,7 @@ static void dumpTable(Archive *fout, TableInfo *tbinfo);
static void dumpTableSchema(Archive *fout, TableInfo *tbinfo); static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo); static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
static void dumpSequence(Archive *fout, TableInfo *tbinfo); static void dumpSequence(Archive *fout, TableInfo *tbinfo);
static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
static void dumpIndex(Archive *fout, IndxInfo *indxinfo); static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo); static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo); static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
@ -1640,9 +1641,6 @@ makeTableDataInfo(TableInfo *tbinfo, bool oids)
/* Skip VIEWs (no data to dump) */ /* Skip VIEWs (no data to dump) */
if (tbinfo->relkind == RELKIND_VIEW) if (tbinfo->relkind == RELKIND_VIEW)
return; return;
/* Skip SEQUENCEs (handled elsewhere) */
if (tbinfo->relkind == RELKIND_SEQUENCE)
return;
/* Skip FOREIGN TABLEs (no data to dump) */ /* Skip FOREIGN TABLEs (no data to dump) */
if (tbinfo->relkind == RELKIND_FOREIGN_TABLE) if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
return; return;
@ -7318,7 +7316,10 @@ dumpDumpableObject(Archive *fout, DumpableObject *dobj)
dumpCast(fout, (CastInfo *) dobj); dumpCast(fout, (CastInfo *) dobj);
break; break;
case DO_TABLE_DATA: case DO_TABLE_DATA:
dumpTableData(fout, (TableDataInfo *) dobj); if (((TableDataInfo *) dobj)->tdtable->relkind == RELKIND_SEQUENCE)
dumpSequenceData(fout, (TableDataInfo *) dobj);
else
dumpTableData(fout, (TableDataInfo *) dobj);
break; break;
case DO_DUMMY_TYPE: case DO_DUMMY_TYPE:
/* table rowtypes and array types are never dumped separately */ /* table rowtypes and array types are never dumped separately */
@ -12226,13 +12227,13 @@ collectSecLabels(Archive *fout, SecLabelItem **items)
static void static void
dumpTable(Archive *fout, TableInfo *tbinfo) dumpTable(Archive *fout, TableInfo *tbinfo)
{ {
if (tbinfo->dobj.dump) if (tbinfo->dobj.dump && !dataOnly)
{ {
char *namecopy; char *namecopy;
if (tbinfo->relkind == RELKIND_SEQUENCE) if (tbinfo->relkind == RELKIND_SEQUENCE)
dumpSequence(fout, tbinfo); dumpSequence(fout, tbinfo);
else if (!dataOnly) else
dumpTableSchema(fout, tbinfo); dumpTableSchema(fout, tbinfo);
/* Handle the ACL here */ /* Handle the ACL here */
@ -13347,20 +13348,22 @@ findLastBuiltinOid_V70(Archive *fout)
return last_oid; return last_oid;
} }
/*
* dumpSequence
* write the declaration (not data) of one user-defined sequence
*/
static void static void
dumpSequence(Archive *fout, TableInfo *tbinfo) dumpSequence(Archive *fout, TableInfo *tbinfo)
{ {
PGresult *res; PGresult *res;
char *startv, char *startv,
*last,
*incby, *incby,
*maxv = NULL, *maxv = NULL,
*minv = NULL, *minv = NULL,
*cache; *cache;
char bufm[100], char bufm[100],
bufx[100]; bufx[100];
bool cycled, bool cycled;
called;
PQExpBuffer query = createPQExpBuffer(); PQExpBuffer query = createPQExpBuffer();
PQExpBuffer delqry = createPQExpBuffer(); PQExpBuffer delqry = createPQExpBuffer();
PQExpBuffer labelq = createPQExpBuffer(); PQExpBuffer labelq = createPQExpBuffer();
@ -13375,7 +13378,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
{ {
appendPQExpBuffer(query, appendPQExpBuffer(query,
"SELECT sequence_name, " "SELECT sequence_name, "
"start_value, last_value, increment_by, " "start_value, increment_by, "
"CASE WHEN increment_by > 0 AND max_value = %s THEN NULL " "CASE WHEN increment_by > 0 AND max_value = %s THEN NULL "
" WHEN increment_by < 0 AND max_value = -1 THEN NULL " " WHEN increment_by < 0 AND max_value = -1 THEN NULL "
" ELSE max_value " " ELSE max_value "
@ -13384,7 +13387,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
" WHEN increment_by < 0 AND min_value = %s THEN NULL " " WHEN increment_by < 0 AND min_value = %s THEN NULL "
" ELSE min_value " " ELSE min_value "
"END AS min_value, " "END AS min_value, "
"cache_value, is_cycled, is_called from %s", "cache_value, is_cycled FROM %s",
bufx, bufm, bufx, bufm,
fmtId(tbinfo->dobj.name)); fmtId(tbinfo->dobj.name));
} }
@ -13392,7 +13395,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
{ {
appendPQExpBuffer(query, appendPQExpBuffer(query,
"SELECT sequence_name, " "SELECT sequence_name, "
"0 AS start_value, last_value, increment_by, " "0 AS start_value, increment_by, "
"CASE WHEN increment_by > 0 AND max_value = %s THEN NULL " "CASE WHEN increment_by > 0 AND max_value = %s THEN NULL "
" WHEN increment_by < 0 AND max_value = -1 THEN NULL " " WHEN increment_by < 0 AND max_value = -1 THEN NULL "
" ELSE max_value " " ELSE max_value "
@ -13401,7 +13404,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
" WHEN increment_by < 0 AND min_value = %s THEN NULL " " WHEN increment_by < 0 AND min_value = %s THEN NULL "
" ELSE min_value " " ELSE min_value "
"END AS min_value, " "END AS min_value, "
"cache_value, is_cycled, is_called from %s", "cache_value, is_cycled FROM %s",
bufx, bufm, bufx, bufm,
fmtId(tbinfo->dobj.name)); fmtId(tbinfo->dobj.name));
} }
@ -13428,165 +13431,120 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
#endif #endif
startv = PQgetvalue(res, 0, 1); startv = PQgetvalue(res, 0, 1);
last = PQgetvalue(res, 0, 2); incby = PQgetvalue(res, 0, 2);
incby = PQgetvalue(res, 0, 3); if (!PQgetisnull(res, 0, 3))
maxv = PQgetvalue(res, 0, 3);
if (!PQgetisnull(res, 0, 4)) if (!PQgetisnull(res, 0, 4))
maxv = PQgetvalue(res, 0, 4); minv = PQgetvalue(res, 0, 4);
if (!PQgetisnull(res, 0, 5)) cache = PQgetvalue(res, 0, 5);
minv = PQgetvalue(res, 0, 5); cycled = (strcmp(PQgetvalue(res, 0, 6), "t") == 0);
cache = PQgetvalue(res, 0, 6);
cycled = (strcmp(PQgetvalue(res, 0, 7), "t") == 0);
called = (strcmp(PQgetvalue(res, 0, 8), "t") == 0);
/* /*
* The logic we use for restoring sequences is as follows: * DROP must be fully qualified in case same name appears in pg_catalog
*
* Add a CREATE SEQUENCE statement as part of a "schema" dump (use
* last_val for start if called is false, else use min_val for start_val).
* Also, if the sequence is owned by a column, add an ALTER SEQUENCE OWNED
* BY command for it.
*
* Add a 'SETVAL(seq, last_val, iscalled)' as part of a "data" dump.
*/ */
if (!dataOnly) appendPQExpBuffer(delqry, "DROP SEQUENCE %s.",
fmtId(tbinfo->dobj.namespace->dobj.name));
appendPQExpBuffer(delqry, "%s;\n",
fmtId(tbinfo->dobj.name));
resetPQExpBuffer(query);
if (binary_upgrade)
{ {
/* binary_upgrade_set_pg_class_oids(fout, query,
* DROP must be fully qualified in case same name appears in tbinfo->dobj.catId.oid, false);
* pg_catalog binary_upgrade_set_type_oids_by_rel_oid(fout, query,
*/ tbinfo->dobj.catId.oid);
appendPQExpBuffer(delqry, "DROP SEQUENCE %s.", }
fmtId(tbinfo->dobj.namespace->dobj.name));
appendPQExpBuffer(delqry, "%s;\n",
fmtId(tbinfo->dobj.name));
resetPQExpBuffer(query); appendPQExpBuffer(query,
"CREATE SEQUENCE %s\n",
fmtId(tbinfo->dobj.name));
if (binary_upgrade) if (fout->remoteVersion >= 80400)
appendPQExpBuffer(query, " START WITH %s\n", startv);
appendPQExpBuffer(query, " INCREMENT BY %s\n", incby);
if (minv)
appendPQExpBuffer(query, " MINVALUE %s\n", minv);
else
appendPQExpBuffer(query, " NO MINVALUE\n");
if (maxv)
appendPQExpBuffer(query, " MAXVALUE %s\n", maxv);
else
appendPQExpBuffer(query, " NO MAXVALUE\n");
appendPQExpBuffer(query,
" CACHE %s%s",
cache, (cycled ? "\n CYCLE" : ""));
appendPQExpBuffer(query, ";\n");
appendPQExpBuffer(labelq, "SEQUENCE %s", fmtId(tbinfo->dobj.name));
/* binary_upgrade: no need to clear TOAST table oid */
if (binary_upgrade)
binary_upgrade_extension_member(query, &tbinfo->dobj,
labelq->data);
ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "SEQUENCE", SECTION_PRE_DATA,
query->data, delqry->data, NULL,
NULL, 0,
NULL, NULL);
/*
* If the sequence is owned by a table column, emit the ALTER for it as a
* separate TOC entry immediately following the sequence's own entry.
* It's OK to do this rather than using full sorting logic, because the
* dependency that tells us it's owned will have forced the table to be
* created first. We can't just include the ALTER in the TOC entry
* because it will fail if we haven't reassigned the sequence owner to
* match the table's owner.
*
* We need not schema-qualify the table reference because both sequence
* and table must be in the same schema.
*/
if (OidIsValid(tbinfo->owning_tab))
{
TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
if (owning_tab && owning_tab->dobj.dump)
{ {
binary_upgrade_set_pg_class_oids(fout, query, resetPQExpBuffer(query);
tbinfo->dobj.catId.oid, false); appendPQExpBuffer(query, "ALTER SEQUENCE %s",
binary_upgrade_set_type_oids_by_rel_oid(fout, query, fmtId(tbinfo->dobj.name));
tbinfo->dobj.catId.oid); appendPQExpBuffer(query, " OWNED BY %s",
} fmtId(owning_tab->dobj.name));
appendPQExpBuffer(query, ".%s;\n",
appendPQExpBuffer(query,
"CREATE SEQUENCE %s\n",
fmtId(tbinfo->dobj.name));
if (fout->remoteVersion >= 80400)
appendPQExpBuffer(query, " START WITH %s\n", startv);
else
{
/*
* Versions before 8.4 did not remember the true start value. If
* is_called is false then the sequence has never been incremented
* so we can use last_val. Otherwise punt and let it default.
*/
if (!called)
appendPQExpBuffer(query, " START WITH %s\n", last);
}
appendPQExpBuffer(query, " INCREMENT BY %s\n", incby);
if (minv)
appendPQExpBuffer(query, " MINVALUE %s\n", minv);
else
appendPQExpBuffer(query, " NO MINVALUE\n");
if (maxv)
appendPQExpBuffer(query, " MAXVALUE %s\n", maxv);
else
appendPQExpBuffer(query, " NO MAXVALUE\n");
appendPQExpBuffer(query,
" CACHE %s%s",
cache, (cycled ? "\n CYCLE" : ""));
appendPQExpBuffer(query, ";\n");
appendPQExpBuffer(labelq, "SEQUENCE %s", fmtId(tbinfo->dobj.name));
/* binary_upgrade: no need to clear TOAST table oid */
if (binary_upgrade)
binary_upgrade_extension_member(query, &tbinfo->dobj,
labelq->data);
ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "SEQUENCE", SECTION_PRE_DATA,
query->data, delqry->data, NULL,
NULL, 0,
NULL, NULL);
/*
* If the sequence is owned by a table column, emit the ALTER for it
* as a separate TOC entry immediately following the sequence's own
* entry. It's OK to do this rather than using full sorting logic,
* because the dependency that tells us it's owned will have forced
* the table to be created first. We can't just include the ALTER in
* the TOC entry because it will fail if we haven't reassigned the
* sequence owner to match the table's owner.
*
* We need not schema-qualify the table reference because both
* sequence and table must be in the same schema.
*/
if (OidIsValid(tbinfo->owning_tab))
{
TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
if (owning_tab && owning_tab->dobj.dump)
{
resetPQExpBuffer(query);
appendPQExpBuffer(query, "ALTER SEQUENCE %s",
fmtId(tbinfo->dobj.name));
appendPQExpBuffer(query, " OWNED BY %s",
fmtId(owning_tab->dobj.name));
appendPQExpBuffer(query, ".%s;\n",
fmtId(owning_tab->attnames[tbinfo->owning_col - 1])); fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
ArchiveEntry(fout, nilCatalogId, createDumpId(), ArchiveEntry(fout, nilCatalogId, createDumpId(),
tbinfo->dobj.name, tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name, tbinfo->dobj.namespace->dobj.name,
NULL, NULL,
tbinfo->rolname, tbinfo->rolname,
false, "SEQUENCE OWNED BY", SECTION_PRE_DATA, false, "SEQUENCE OWNED BY", SECTION_PRE_DATA,
query->data, "", NULL, query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1, &(tbinfo->dobj.dumpId), 1,
NULL, NULL); NULL, NULL);
}
} }
/* Dump Sequence Comments and Security Labels */
dumpComment(fout, labelq->data,
tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
dumpSecLabel(fout, labelq->data,
tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
} }
if (!schemaOnly) /* Dump Sequence Comments and Security Labels */
{ dumpComment(fout, labelq->data,
resetPQExpBuffer(query); tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
appendPQExpBuffer(query, "SELECT pg_catalog.setval("); tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
appendStringLiteralAH(query, fmtId(tbinfo->dobj.name), fout); dumpSecLabel(fout, labelq->data,
appendPQExpBuffer(query, ", %s, %s);\n", tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
last, (called ? "true" : "false")); tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "SEQUENCE SET", SECTION_PRE_DATA,
query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
}
PQclear(res); PQclear(res);
@ -13595,6 +13553,61 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
destroyPQExpBuffer(labelq); destroyPQExpBuffer(labelq);
} }
/*
* dumpSequenceData
* write the data of one user-defined sequence
*/
static void
dumpSequenceData(Archive *fout, TableDataInfo *tdinfo)
{
TableInfo *tbinfo = tdinfo->tdtable;
PGresult *res;
char *last;
bool called;
PQExpBuffer query = createPQExpBuffer();
/* Make sure we are in proper schema */
selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
appendPQExpBuffer(query,
"SELECT last_value, is_called FROM %s",
fmtId(tbinfo->dobj.name));
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) != 1)
{
write_msg(NULL, ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)\n",
"query to get data of sequence \"%s\" returned %d rows (expected 1)\n",
PQntuples(res)),
tbinfo->dobj.name, PQntuples(res));
exit_nicely(1);
}
last = PQgetvalue(res, 0, 0);
called = (strcmp(PQgetvalue(res, 0, 1), "t") == 0);
resetPQExpBuffer(query);
appendPQExpBuffer(query, "SELECT pg_catalog.setval(");
appendStringLiteralAH(query, fmtId(tbinfo->dobj.name), fout);
appendPQExpBuffer(query, ", %s, %s);\n",
last, (called ? "true" : "false"));
ArchiveEntry(fout, nilCatalogId, createDumpId(),
tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "SEQUENCE SET", SECTION_DATA,
query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
PQclear(res);
destroyPQExpBuffer(query);
}
static void static void
dumpTrigger(Archive *fout, TriggerInfo *tginfo) dumpTrigger(Archive *fout, TriggerInfo *tginfo)
{ {