postgresql/src/bin/pg_dump/pg_backup_archiver.c

2619 lines
59 KiB
C
Raw Normal View History

2000-07-04 16:25:28 +02:00
/*-------------------------------------------------------------------------
*
* pg_backup_archiver.c
*
* Private implementation of the archiver routines.
*
* See the headers to pg_restore for more details.
*
* Copyright (c) 2000, Philip Warner
* Rights are granted to use this software in any way so long
2001-03-22 05:01:46 +01:00
* as this notice is not removed.
2000-07-04 16:25:28 +02:00
*
* The author is not responsible for loss or damages that may
* result from its use.
2000-07-04 16:25:28 +02:00
*
*
* IDENTIFICATION
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.94 2004/08/20 20:00:34 momjian Exp $
*
2000-07-04 16:25:28 +02:00
*-------------------------------------------------------------------------
*/
#include "pg_backup.h"
#include "pg_dump.h"
2000-07-04 16:25:28 +02:00
#include "pg_backup_archiver.h"
#include "pg_backup_db.h"
#include "dumputils.h"
#include <ctype.h>
#include <errno.h>
#include <unistd.h>
2000-07-04 16:25:28 +02:00
#include "pqexpbuffer.h"
#include "libpq/libpq-fs.h"
typedef enum _teReqs_
{
REQ_SCHEMA = 1,
REQ_DATA = 2,
REQ_ALL = REQ_SCHEMA + REQ_DATA
} teReqs;
const char *progname;
static char *modulename = gettext_noop("archiver");
2001-03-22 05:01:46 +01:00
static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
const int compression, ArchiveMode mode);
static char *_getObjectFromDropStmt(const char *dropStmt, const char *type);
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass);
static void fixPriorBlobRefs(ArchiveHandle *AH, TocEntry *blobte,
RestoreOptions *ropt);
static void _doSetFixedOutputState(ArchiveHandle *AH);
static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
static void _doSetWithOids(ArchiveHandle *AH, const bool withOids);
static void _reconnectToDB(ArchiveHandle *AH, const char *dbname, const char *user);
static void _becomeUser(ArchiveHandle *AH, const char *user);
static void _becomeOwner(ArchiveHandle *AH, TocEntry *te);
static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName);
static teReqs _tocEntryRequired(TocEntry *te, RestoreOptions *ropt, bool acl_pass);
2001-03-22 05:01:46 +01:00
static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
static TocEntry *getTocEntryByDumpId(ArchiveHandle *AH, DumpId id);
2001-03-22 05:01:46 +01:00
static void _moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te);
static int _discoverArchiveFormat(ArchiveHandle *AH);
2000-07-04 16:25:28 +02:00
static void _write_msg(const char *modulename, const char *fmt, va_list ap);
static void _die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt, va_list ap);
2001-03-22 05:01:46 +01:00
static int _canRestoreBlobs(ArchiveHandle *AH);
static int _restoringToDB(ArchiveHandle *AH);
2000-07-04 16:25:28 +02:00
/*
2001-03-22 05:01:46 +01:00
* Wrapper functions.
*
* The objective it to make writing new formats and dumpers as simple
* as possible, if necessary at the expense of extra function calls etc.
2000-07-04 16:25:28 +02:00
*
*/
/* Create a new archive */
/* Public */
Archive *
2001-03-22 05:01:46 +01:00
CreateArchive(const char *FileSpec, const ArchiveFormat fmt,
const int compression)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = _allocAH(FileSpec, fmt, compression, archModeWrite);
return (Archive *) AH;
2000-07-04 16:25:28 +02:00
}
/* Open an existing archive */
/* Public */
Archive *
2001-03-22 05:01:46 +01:00
OpenArchive(const char *FileSpec, const ArchiveFormat fmt)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = _allocAH(FileSpec, fmt, 0, archModeRead);
return (Archive *) AH;
2000-07-04 16:25:28 +02:00
}
/* Public */
2001-03-22 05:01:46 +01:00
void
CloseArchive(Archive *AHX)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
int res = 0;
ArchiveHandle *AH = (ArchiveHandle *) AHX;
(*AH->ClosePtr) (AH);
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Close the output */
if (AH->gzOut)
res = GZCLOSE(AH->OF);
2001-03-22 05:01:46 +01:00
else if (AH->OF != stdout)
res = fclose(AH->OF);
if (res != 0)
die_horribly(AH, modulename, "could not close output archive file\n");
2000-07-04 16:25:28 +02:00
}
/* Public */
2001-03-22 05:01:46 +01:00
void
RestoreArchive(Archive *AHX, RestoreOptions *ropt)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = (ArchiveHandle *) AHX;
TocEntry *te = AH->toc->next;
teReqs reqs;
2001-03-22 05:01:46 +01:00
OutputContext sav;
int impliedDataOnly;
bool defnDumped;
2000-07-04 16:25:28 +02:00
AH->ropt = ropt;
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
AH->stage = STAGE_INITIALIZING;
/*
* Check for nonsensical option combinations.
*
* NB: create+dropSchema is useless because if you're creating the DB,
* there's no need to drop individual items in it. Moreover, if we
* tried to do that then we'd issue the drops in the database
* initially connected to, not the one we will create, which is very
* bad...
*/
if (ropt->create && ropt->dropSchema)
die_horribly(AH, modulename, "-C and -c are incompatible options\n");
/*
* If we're using a DB connection, then connect it.
*/
if (ropt->useDB)
{
ahlog(AH, 1, "connecting to database for restore\n");
if (AH->version < K_VERS_1_3)
die_horribly(AH, modulename, "direct database connections are not supported in pre-1.3 archives\n");
/* XXX Should get this from the archive */
AHX->minRemoteVersion = 070100;
AHX->maxRemoteVersion = 999999;
ConnectDatabase(AHX, ropt->dbname,
ropt->pghost, ropt->pgport, ropt->username,
2001-03-22 05:01:46 +01:00
ropt->requirePassword, ropt->ignoreVersion);
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
/* If we're talking to the DB directly, don't send comments since they obscure SQL when displaying errors */
AH->noTocComments = 1;
}
/*
2002-09-04 22:31:48 +02:00
* Work out if we have an implied data-only restore. This can happen
* if the dump was data only or if the user has used a toc list to
2001-03-22 05:01:46 +01:00
* exclude all of the schema data. All we do is look for schema
* entries - if none are found then we set the dataOnly flag.
*
2001-03-22 05:01:46 +01:00
* We could scan for wanted TABLE entries, but that is not the same as
* dataOnly. At this stage, it seems unnecessary (6-Mar-2001).
2001-03-22 05:01:46 +01:00
*/
if (!ropt->dataOnly)
{
te = AH->toc->next;
impliedDataOnly = 1;
2001-03-22 05:01:46 +01:00
while (te != AH->toc)
{
reqs = _tocEntryRequired(te, ropt, false);
if ((reqs & REQ_SCHEMA) != 0)
2001-03-22 05:01:46 +01:00
{ /* It's schema, and it's wanted */
impliedDataOnly = 0;
break;
}
te = te->next;
}
if (impliedDataOnly)
{
ropt->dataOnly = impliedDataOnly;
ahlog(AH, 1, "implied data-only restore\n");
}
2001-03-22 05:01:46 +01:00
}
/*
2001-03-22 05:01:46 +01:00
* Setup the output file if necessary.
*/
2001-03-22 05:01:46 +01:00
if (ropt->filename || ropt->compression)
sav = SetOutput(AH, ropt->filename, ropt->compression);
2000-07-04 16:25:28 +02:00
ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
2003-08-04 02:43:34 +02:00
/*
* Establish important parameter values right away.
*/
_doSetFixedOutputState(AH);
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
AH->stage = STAGE_PROCESSING;
2001-03-22 05:01:46 +01:00
/*
* Drop the items at the start, in reverse order
*/
2001-03-22 05:01:46 +01:00
if (ropt->dropSchema)
{
te = AH->toc->prev;
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
AH->currentTE = te;
2001-03-22 05:01:46 +01:00
while (te != AH->toc)
{
reqs = _tocEntryRequired(te, ropt, false);
if (((reqs & REQ_SCHEMA) != 0) && te->dropStmt)
{
/* We want the schema */
ahlog(AH, 1, "dropping %s %s\n", te->desc, te->tag);
/* Select owner and schema as necessary */
_becomeOwner(AH, te);
_selectOutputSchema(AH, te->namespace);
/* Drop it */
ahprintf(AH, "%s", te->dropStmt);
}
te = te->prev;
}
2001-03-22 05:01:46 +01:00
}
2000-07-04 16:25:28 +02:00
/*
* Now process each TOC entry
*/
2001-03-22 05:01:46 +01:00
te = AH->toc->next;
while (te != AH->toc)
{
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
AH->currentTE = te;
/* Work out what, if anything, we want from this entry */
reqs = _tocEntryRequired(te, ropt, false);
/* Dump any relevant dump warnings to stderr */
if (!ropt->suppressDumpWarnings && strcmp(te->desc, "WARNING") == 0)
{
if (!ropt->dataOnly && te->defn != NULL && strlen(te->defn) != 0)
write_msg(modulename, "warning from original dump file: %s\n", te->defn);
else if (te->copyStmt != NULL && strlen(te->copyStmt) != 0)
write_msg(modulename, "warning from original dump file: %s\n", te->copyStmt);
}
defnDumped = false;
if ((reqs & REQ_SCHEMA) != 0) /* We want the schema */
{
ahlog(AH, 1, "creating %s %s\n", te->desc, te->tag);
_printTocEntry(AH, te, ropt, false, false);
defnDumped = true;
/* If we created a DB, connect to it... */
2001-03-22 05:01:46 +01:00
if (strcmp(te->desc, "DATABASE") == 0)
{
ahlog(AH, 1, "connecting to new database \"%s\" as user \"%s\"\n", te->tag, te->owner);
_reconnectToDB(AH, te->tag, te->owner);
}
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/*
* If we have a data component, then process it
*/
if ((reqs & REQ_DATA) != 0)
2001-03-22 05:01:46 +01:00
{
/*
* hadDumper will be set if there is genuine data component
* for this node. Otherwise, we need to check the defn field
* for statements that need to be executed in data-only
* restores.
*/
if (te->hadDumper)
{
/*
* If we can output the data, then restore it.
*/
if (AH->PrintTocDataPtr != NULL && (reqs & REQ_DATA) != 0)
{
#ifndef HAVE_LIBZ
if (AH->compression != 0)
die_horribly(AH, modulename, "cannot restore from compressed archive (not configured for compression support)\n");
#endif
_printTocEntry(AH, te, ropt, true, false);
/*
* Maybe we can't do BLOBS, so check if this node is
* for BLOBS
*/
if ((strcmp(te->desc, "BLOBS") == 0) &&
!_canRestoreBlobs(AH))
{
ahprintf(AH, "--\n-- SKIPPED \n--\n\n");
/*
* This is a bit nasty - we assume, for the
* moment, that if a custom output is used, then
* we don't want warnings.
*/
if (!AH->CustomOutPtr)
write_msg(modulename, "WARNING: skipping large-object restoration\n");
}
else
{
_disableTriggersIfNecessary(AH, te, ropt);
/* Select owner and schema as necessary */
_becomeOwner(AH, te);
_selectOutputSchema(AH, te->namespace);
ahlog(AH, 1, "restoring data for table \"%s\"\n", te->tag);
/*
* If we have a copy statement, use it. As of
* V1.3, these are separate to allow easy import
* from withing a database connection. Pre 1.3
* archives can not use DB connections and are
* sent to output only.
*
* For V1.3+, the table data MUST have a copy
* statement so that we can go into appropriate
* mode with libpq.
*/
if (te->copyStmt && strlen(te->copyStmt) > 0)
ahprintf(AH, te->copyStmt);
(*AH->PrintTocDataPtr) (AH, te, ropt);
/*
* If we just restored blobs, fix references in
* previously-loaded tables; otherwise, if we
* previously restored blobs, fix references in
* this table. Note that in standard cases the BLOBS
* entry comes after all TABLE DATA entries, but
* we should cope with other orders in case the
* user demands reordering.
*/
if (strcmp(te->desc, "BLOBS") == 0)
fixPriorBlobRefs(AH, te, ropt);
else if (AH->createdBlobXref &&
strcmp(te->desc, "TABLE DATA") == 0)
{
ahlog(AH, 1, "fixing up large-object cross-reference for \"%s\"\n", te->tag);
FixupBlobRefs(AH, te);
}
_enableTriggersIfNecessary(AH, te, ropt);
}
}
}
else if (!defnDumped)
{
/* If we haven't already dumped the defn part, do so now */
ahlog(AH, 1, "executing %s %s\n", te->desc, te->tag);
_printTocEntry(AH, te, ropt, false, false);
}
}
te = te->next;
} /* end loop over TOC entries */
2000-07-04 16:25:28 +02:00
/*
* Scan TOC again to output ownership commands and ACLs
*/
te = AH->toc->next;
while (te != AH->toc)
{
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
AH->currentTE = te;
/* Work out what, if anything, we want from this entry */
reqs = _tocEntryRequired(te, ropt, true);
if ((reqs & REQ_SCHEMA) != 0) /* We want the schema */
{
ahlog(AH, 1, "setting owner and acl for %s %s\n",
te->desc, te->tag);
_printTocEntry(AH, te, ropt, false, true);
}
te = te->next;
}
/*
* Clean up & we're done.
*/
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
AH->stage = STAGE_FINALIZING;
if (ropt->filename || ropt->compression)
ResetOutput(AH, sav);
if (ropt->useDB)
{
PQfinish(AH->connection);
AH->connection = NULL;
if (AH->blobConnection)
{
PQfinish(AH->blobConnection);
AH->blobConnection = NULL;
}
}
ahprintf(AH, "--\n-- PostgreSQL database dump complete\n--\n\n");
}
/*
* After restoring BLOBS, fix all blob references in previously-restored
* tables. (Normally, the BLOBS entry should appear after all TABLE DATA
* entries, so this will in fact handle all blob references.)
*/
static void
fixPriorBlobRefs(ArchiveHandle *AH, TocEntry *blobte, RestoreOptions *ropt)
{
TocEntry *te;
teReqs reqs;
if (AH->createdBlobXref)
{
/* NULL parameter means disable ALL user triggers */
_disableTriggersIfNecessary(AH, NULL, ropt);
for (te = AH->toc->next; te != blobte; te = te->next)
2001-03-22 05:01:46 +01:00
{
if (strcmp(te->desc, "TABLE DATA") == 0)
{
reqs = _tocEntryRequired(te, ropt, false);
if ((reqs & REQ_DATA) != 0) /* We loaded the data */
{
ahlog(AH, 1, "fixing up large-object cross-reference for \"%s\"\n", te->tag);
FixupBlobRefs(AH, te);
}
}
}
/* NULL parameter means enable ALL user triggers */
_enableTriggersIfNecessary(AH, NULL, ropt);
}
2000-07-04 16:25:28 +02:00
}
/*
* Allocate a new RestoreOptions block.
* This is mainly so we can initialize it, but also for future expansion,
*/
2001-03-22 05:01:46 +01:00
RestoreOptions *
NewRestoreOptions(void)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
RestoreOptions *opts;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
opts = (RestoreOptions *) calloc(1, sizeof(RestoreOptions));
2000-07-04 16:25:28 +02:00
opts->format = archUnknown;
opts->suppressDumpWarnings = false;
opts->exit_on_error = false;
2000-07-04 16:25:28 +02:00
return opts;
}
/*
* Returns true if we're restoring directly to the database (and
* aren't just making a psql script that can do the restoration).
*/
2001-03-22 05:01:46 +01:00
static int
_restoringToDB(ArchiveHandle *AH)
{
return (AH->ropt->useDB && AH->connection);
}
2001-03-22 05:01:46 +01:00
static int
_canRestoreBlobs(ArchiveHandle *AH)
{
return _restoringToDB(AH);
}
2001-03-22 05:01:46 +01:00
static void
_disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
2000-07-04 16:25:28 +02:00
{
/* This hack is only needed in a data-only restore */
if (!ropt->dataOnly || !ropt->disable_triggers)
return;
/* Don't do it for the BLOBS TocEntry, either */
if (te && strcmp(te->desc, "BLOBS") == 0)
return;
/*
2002-09-04 22:31:48 +02:00
* Become superuser if possible, since they are the only ones who can
* update pg_class. If -S was not given, assume the initial user identity
* is a superuser.
*/
_becomeUser(AH, ropt->superuser);
ahlog(AH, 1, "disabling triggers\n");
/*
2001-03-22 05:01:46 +01:00
* Disable them. This is a hack. Needs to be done via an appropriate
* 'SET' command when one is available.
*/
2001-03-22 05:01:46 +01:00
ahprintf(AH, "-- Disable triggers\n");
/*
* Just update the AFFECTED table, if known. Otherwise update all
* non-system tables.
*/
if (te && te->tag && strlen(te->tag) > 0)
ahprintf(AH, "UPDATE pg_catalog.pg_class SET reltriggers = 0 "
"WHERE oid = '%s'::pg_catalog.regclass;\n\n",
fmtId(te->tag));
else
ahprintf(AH, "UPDATE pg_catalog.pg_class SET reltriggers = 0 FROM pg_catalog.pg_namespace "
"WHERE relnamespace = pg_namespace.oid AND nspname !~ '^pg_';\n\n");
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
static void
_enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
2000-07-04 16:25:28 +02:00
{
/* This hack is only needed in a data-only restore */
if (!ropt->dataOnly || !ropt->disable_triggers)
return;
/* Don't do it for the BLOBS TocEntry, either */
if (te && strcmp(te->desc, "BLOBS") == 0)
return;
/*
2002-09-04 22:31:48 +02:00
* Become superuser if possible, since they are the only ones who can
* update pg_class. If -S was not given, assume the initial user identity
* is a superuser.
*/
_becomeUser(AH, ropt->superuser);
ahlog(AH, 1, "enabling triggers\n");
/*
2001-03-22 05:01:46 +01:00
* Enable them. This is a hack. Needs to be done via an appropriate
* 'SET' command when one is available.
*/
2001-03-22 05:01:46 +01:00
ahprintf(AH, "-- Enable triggers\n");
/*
* Just update the AFFECTED table, if known. Otherwise update all
* non-system tables.
*/
if (te && te->tag && strlen(te->tag) > 0)
ahprintf(AH, "UPDATE pg_catalog.pg_class SET reltriggers = "
"(SELECT pg_catalog.count(*) FROM pg_catalog.pg_trigger where pg_class.oid = tgrelid) "
"WHERE oid = '%s'::pg_catalog.regclass;\n\n",
fmtId(te->tag));
2001-03-22 05:01:46 +01:00
else
ahprintf(AH, "UPDATE pg_catalog.pg_class SET reltriggers = "
"(SELECT pg_catalog.count(*) FROM pg_catalog.pg_trigger where pg_class.oid = tgrelid) "
"FROM pg_catalog.pg_namespace "
"WHERE relnamespace = pg_namespace.oid AND nspname !~ '^pg_';\n\n");
}
2000-07-04 16:25:28 +02:00
/*
* This is a routine that is part of the dumper interface, hence the 'Archive*' parameter.
2000-07-04 16:25:28 +02:00
*/
/* Public */
size_t
WriteData(Archive *AHX, const void *data, size_t dLen)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = (ArchiveHandle *) AHX;
2000-07-04 16:25:28 +02:00
if (!AH->currToc)
die_horribly(AH, modulename, "internal error -- WriteData cannot be called outside the context of a DataDumper routine\n");
2001-03-22 05:01:46 +01:00
return (*AH->WriteDataPtr) (AH, data, dLen);
2000-07-04 16:25:28 +02:00
}
/*
2001-03-22 05:01:46 +01:00
* Create a new TOC entry. The TOC was designed as a TOC, but is now the
2000-07-04 16:25:28 +02:00
* repository for all metadata. But the name has stuck.
*/
/* Public */
2001-03-22 05:01:46 +01:00
void
ArchiveEntry(Archive *AHX,
CatalogId catalogId, DumpId dumpId,
const char *tag,
const char *namespace, const char *owner, bool withOids,
const char *desc, const char *defn,
const char *dropStmt, const char *copyStmt,
const DumpId *deps, int nDeps,
2001-03-22 05:01:46 +01:00
DataDumperPtr dumpFn, void *dumpArg)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = (ArchiveHandle *) AHX;
TocEntry *newToc;
newToc = (TocEntry *) calloc(1, sizeof(TocEntry));
if (!newToc)
die_horribly(AH, modulename, "out of memory\n");
2001-03-22 05:01:46 +01:00
AH->tocCount++;
if (dumpId > AH->maxDumpId)
AH->maxDumpId = dumpId;
2001-03-22 05:01:46 +01:00
newToc->prev = AH->toc->prev;
newToc->next = AH->toc;
AH->toc->prev->next = newToc;
AH->toc->prev = newToc;
newToc->catalogId = catalogId;
newToc->dumpId = dumpId;
newToc->tag = strdup(tag);
newToc->namespace = namespace ? strdup(namespace) : NULL;
newToc->owner = strdup(owner);
newToc->withOids = withOids;
2001-03-22 05:01:46 +01:00
newToc->desc = strdup(desc);
newToc->defn = strdup(defn);
newToc->dropStmt = strdup(dropStmt);
newToc->copyStmt = copyStmt ? strdup(copyStmt) : NULL;
if (nDeps > 0)
{
newToc->dependencies = (DumpId *) malloc(nDeps * sizeof(DumpId));
memcpy(newToc->dependencies, deps, nDeps * sizeof(DumpId));
newToc->nDeps = nDeps;
}
else
{
newToc->dependencies = NULL;
newToc->nDeps = 0;
}
newToc->dataDumper = dumpFn;
newToc->dataDumperArg = dumpArg;
newToc->hadDumper = dumpFn ? true : false;
2000-07-04 16:25:28 +02:00
newToc->formatData = NULL;
2000-07-04 16:25:28 +02:00
if (AH->ArchiveEntryPtr != NULL)
2001-03-22 05:01:46 +01:00
(*AH->ArchiveEntryPtr) (AH, newToc);
2000-07-04 16:25:28 +02:00
}
/* Public */
2001-03-22 05:01:46 +01:00
void
PrintTOCSummary(Archive *AHX, RestoreOptions *ropt)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = (ArchiveHandle *) AHX;
TocEntry *te = AH->toc->next;
OutputContext sav;
char *fmtName;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (ropt->filename)
sav = SetOutput(AH, ropt->filename, 0 /* no compression */);
2000-07-04 16:25:28 +02:00
ahprintf(AH, ";\n; Archive created at %s", ctime(&AH->createDate));
ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %d\n",
2001-03-22 05:01:46 +01:00
AH->archdbname, AH->tocCount, AH->compression);
2001-03-22 05:01:46 +01:00
switch (AH->format)
{
case archFiles:
fmtName = "FILES";
break;
case archCustom:
fmtName = "CUSTOM";
break;
case archTar:
fmtName = "TAR";
break;
default:
fmtName = "UNKNOWN";
}
ahprintf(AH, "; Dump Version: %d.%d-%d\n", AH->vmaj, AH->vmin, AH->vrev);
ahprintf(AH, "; Format: %s\n", fmtName);
2002-10-27 03:52:10 +01:00
ahprintf(AH, "; Integer: %d bytes\n", (int) AH->intSize);
ahprintf(AH, "; Offset: %d bytes\n", (int) AH->offSize);
ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n");
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
while (te != AH->toc)
{
if (_tocEntryRequired(te, ropt, false) != 0)
ahprintf(AH, "%d; %u %u %s %s %s\n", te->dumpId,
te->catalogId.tableoid, te->catalogId.oid,
te->desc, te->tag, te->owner);
2000-07-04 16:25:28 +02:00
te = te->next;
2001-03-22 05:01:46 +01:00
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (ropt->filename)
ResetOutput(AH, sav);
2000-07-04 16:25:28 +02:00
}
/***********
* BLOB Archival
***********/
/* Called by a dumper to signal start of a BLOB */
2001-03-22 05:01:46 +01:00
int
StartBlob(Archive *AHX, Oid oid)
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = (ArchiveHandle *) AHX;
2001-03-22 05:01:46 +01:00
if (!AH->StartBlobPtr)
die_horribly(AH, modulename, "large-object output not supported in chosen format\n");
2001-03-22 05:01:46 +01:00
(*AH->StartBlobPtr) (AH, AH->currToc, oid);
2001-03-22 05:01:46 +01:00
return 1;
}
/* Called by a dumper to signal end of a BLOB */
2001-03-22 05:01:46 +01:00
int
EndBlob(Archive *AHX, Oid oid)
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = (ArchiveHandle *) AHX;
2001-03-22 05:01:46 +01:00
if (AH->EndBlobPtr)
(*AH->EndBlobPtr) (AH, AH->currToc, oid);
2001-03-22 05:01:46 +01:00
return 1;
}
/**********
* BLOB Restoration
**********/
/*
2001-03-22 05:01:46 +01:00
* Called by a format handler before any blobs are restored
*/
2001-03-22 05:01:46 +01:00
void
StartRestoreBlobs(ArchiveHandle *AH)
{
AH->blobCount = 0;
}
/*
2001-03-22 05:01:46 +01:00
* Called by a format handler after all blobs are restored
*/
2001-03-22 05:01:46 +01:00
void
EndRestoreBlobs(ArchiveHandle *AH)
{
if (AH->txActive)
{
ahlog(AH, 2, "committing large-object transactions\n");
CommitTransaction(AH);
}
if (AH->blobTxActive)
CommitTransactionXref(AH);
if (AH->createdBlobXref)
CreateBlobXrefIndex(AH);
ahlog(AH, 1, "restored %d large objects\n", AH->blobCount);
}
/*
* Called by a format handler to initiate restoration of a blob
*/
2001-03-22 05:01:46 +01:00
void
StartRestoreBlob(ArchiveHandle *AH, Oid oid)
{
Oid loOid;
AH->blobCount++;
if (!AH->createdBlobXref)
{
if (!AH->connection)
die_horribly(AH, modulename, "cannot restore large objects without a database connection\n");
CreateBlobXrefTable(AH);
AH->createdBlobXref = 1;
}
/* Initialize the LO Buffer */
AH->lo_buf_used = 0;
/*
* Start long-running TXs if necessary
*/
if (!AH->txActive)
{
ahlog(AH, 2, "starting large-object transactions\n");
StartTransaction(AH);
}
if (!AH->blobTxActive)
StartTransactionXref(AH);
loOid = lo_creat(AH->connection, INV_READ | INV_WRITE);
if (loOid == 0)
die_horribly(AH, modulename, "could not create large object\n");
ahlog(AH, 2, "restoring large object with OID %u as %u\n", oid, loOid);
InsertBlobXref(AH, oid, loOid);
AH->loFd = lo_open(AH->connection, loOid, INV_WRITE);
if (AH->loFd == -1)
die_horribly(AH, modulename, "could not open large object\n");
2001-03-22 05:01:46 +01:00
AH->writingBlob = 1;
}
2001-03-22 05:01:46 +01:00
void
EndRestoreBlob(ArchiveHandle *AH, Oid oid)
{
if (AH->lo_buf_used > 0)
{
/* Write remaining bytes from the LO buffer */
2002-09-04 22:31:48 +02:00
size_t res;
res = lo_write(AH->connection, AH->loFd, (void *) AH->lo_buf, AH->lo_buf_used);
ahlog(AH, 5, "wrote remaining %lu bytes of large-object data (result = %lu)\n",
(unsigned long) AH->lo_buf_used, (unsigned long) res);
if (res != AH->lo_buf_used)
die_horribly(AH, modulename, "could not write to large object (result: %lu, expected: %lu)\n",
2002-09-04 22:31:48 +02:00
(unsigned long) res, (unsigned long) AH->lo_buf_used);
AH->lo_buf_used = 0;
}
2001-03-22 05:01:46 +01:00
lo_close(AH->connection, AH->loFd);
AH->writingBlob = 0;
/*
* Commit every BLOB_BATCH_SIZE blobs...
*/
2001-03-22 05:01:46 +01:00
if (((AH->blobCount / BLOB_BATCH_SIZE) * BLOB_BATCH_SIZE) == AH->blobCount)
{
ahlog(AH, 2, "committing large-object transactions\n");
CommitTransaction(AH);
CommitTransactionXref(AH);
}
}
2000-07-04 16:25:28 +02:00
/***********
* Sorting and Reordering
***********/
2001-03-22 05:01:46 +01:00
void
SortTocFromFile(Archive *AHX, RestoreOptions *ropt)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH = (ArchiveHandle *) AHX;
FILE *fh;
char buf[1024];
char *cmnt;
char *endptr;
DumpId id;
2001-03-22 05:01:46 +01:00
TocEntry *te;
TocEntry *tePrev;
/* Allocate space for the 'wanted' array, and init it */
ropt->idWanted = (bool *) malloc(sizeof(bool) * AH->maxDumpId);
memset(ropt->idWanted, 0, sizeof(bool) * AH->maxDumpId);
ropt->limitToList = true;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Set prev entry as head of list */
tePrev = AH->toc;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Setup the file */
fh = fopen(ropt->tocFile, PG_BINARY_R);
if (!fh)
die_horribly(AH, modulename, "could not open TOC file\n");
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
while (fgets(buf, 1024, fh) != NULL)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
/* Find a comment */
cmnt = strchr(buf, ';');
if (cmnt == buf)
continue;
/* End string at comment */
if (cmnt != NULL)
cmnt[0] = '\0';
/* Skip if all spaces */
if (strspn(buf, " \t") == strlen(buf))
continue;
/* Get an ID */
id = strtol(buf, &endptr, 10);
if (endptr == buf || id <= 0 || id > AH->maxDumpId)
2001-03-22 05:01:46 +01:00
{
write_msg(modulename, "WARNING: line ignored: %s\n", buf);
2001-03-22 05:01:46 +01:00
continue;
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Find TOC entry */
te = getTocEntryByDumpId(AH, id);
2001-03-22 05:01:46 +01:00
if (!te)
die_horribly(AH, modulename, "could not find entry for ID %d\n",
id);
2000-07-04 16:25:28 +02:00
ropt->idWanted[id - 1] = true;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
_moveAfter(AH, tePrev, te);
tePrev = te;
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (fclose(fh) != 0)
die_horribly(AH, modulename, "could not close TOC file: %s\n",
strerror(errno));
2000-07-04 16:25:28 +02:00
}
/**********************
* 'Convenience functions that look like standard IO functions
* for writing data when in dump mode.
**********************/
/* Public */
2001-03-22 05:01:46 +01:00
int
archputs(const char *s, Archive *AH)
{
return WriteData(AH, s, strlen(s));
2000-07-04 16:25:28 +02:00
}
/* Public */
2001-03-22 05:01:46 +01:00
int
archprintf(Archive *AH, const char *fmt,...)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
char *p = NULL;
va_list ap;
int bSize = strlen(fmt) + 256;
int cnt = -1;
/*
* This is paranoid: deal with the possibility that vsnprintf is
* willing to ignore trailing null
* or returns > 0 even if string does not fit. It may be the case that
* it returns cnt = bufsize
*/
while (cnt < 0 || cnt >= (bSize - 1))
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
if (p != NULL)
free(p);
bSize *= 2;
2001-03-22 05:01:46 +01:00
p = (char *) malloc(bSize);
if (p == NULL)
exit_horribly(AH, modulename, "out of memory\n");
va_start(ap, fmt);
cnt = vsnprintf(p, bSize, fmt, ap);
va_end(ap);
2001-03-22 05:01:46 +01:00
}
WriteData(AH, p, cnt);
free(p);
return cnt;
2000-07-04 16:25:28 +02:00
}
/*******************************
* Stuff below here should be 'private' to the archiver routines
*******************************/
2001-03-22 05:01:46 +01:00
OutputContext
SetOutput(ArchiveHandle *AH, char *filename, int compression)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
OutputContext sav;
int fn;
2001-03-22 05:01:46 +01:00
/* Replace the AH output file handle */
sav.OF = AH->OF;
sav.gzOut = AH->gzOut;
if (filename)
fn = -1;
2001-03-22 05:01:46 +01:00
else if (AH->FH)
fn = fileno(AH->FH);
else if (AH->fSpec)
{
fn = -1;
2001-03-22 05:01:46 +01:00
filename = AH->fSpec;
}
else
fn = fileno(stdout);
/* If compression explicitly requested, use gzopen */
#ifdef HAVE_LIBZ
2001-03-22 05:01:46 +01:00
if (compression != 0)
{
char fmode[10];
/* Don't use PG_BINARY_x since this is zlib */
sprintf(fmode, "wb%d", compression);
if (fn >= 0)
AH->OF = gzdopen(dup(fn), fmode);
2001-03-22 05:01:46 +01:00
else
AH->OF = gzopen(filename, fmode);
AH->gzOut = 1;
2001-03-22 05:01:46 +01:00
}
else
2000-07-04 16:25:28 +02:00
#endif
{ /* Use fopen */
if (fn >= 0)
AH->OF = fdopen(dup(fn), PG_BINARY_W);
2001-03-22 05:01:46 +01:00
else
AH->OF = fopen(filename, PG_BINARY_W);
AH->gzOut = 0;
2001-03-22 05:01:46 +01:00
}
2000-07-04 16:25:28 +02:00
if (!AH->OF)
die_horribly(AH, modulename, "could not open output file: %s\n", strerror(errno));
2001-03-22 05:01:46 +01:00
return sav;
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
void
ResetOutput(ArchiveHandle *AH, OutputContext sav)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
int res;
2001-03-22 05:01:46 +01:00
if (AH->gzOut)
res = GZCLOSE(AH->OF);
2001-03-22 05:01:46 +01:00
else
res = fclose(AH->OF);
if (res != 0)
die_horribly(AH, modulename, "could not close output file: %s\n",
strerror(errno));
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
AH->gzOut = sav.gzOut;
AH->OF = sav.OF;
2000-07-04 16:25:28 +02:00
}
/*
2001-03-22 05:01:46 +01:00
* Print formatted text to the output file (usually stdout).
2000-07-04 16:25:28 +02:00
*/
2001-03-22 05:01:46 +01:00
int
ahprintf(ArchiveHandle *AH, const char *fmt,...)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
char *p = NULL;
va_list ap;
int bSize = strlen(fmt) + 256; /* Should be enough */
int cnt = -1;
/*
* This is paranoid: deal with the possibility that vsnprintf is
* willing to ignore trailing null
*/
/*
* or returns > 0 even if string does not fit. It may be the case that
* it returns cnt = bufsize
*/
while (cnt < 0 || cnt >= (bSize - 1))
{
2001-03-22 05:01:46 +01:00
if (p != NULL)
free(p);
bSize *= 2;
2001-03-22 05:01:46 +01:00
p = (char *) malloc(bSize);
if (p == NULL)
die_horribly(AH, modulename, "out of memory\n");
va_start(ap, fmt);
cnt = vsnprintf(p, bSize, fmt, ap);
va_end(ap);
2001-03-22 05:01:46 +01:00
}
ahwrite(p, 1, cnt, AH);
free(p);
return cnt;
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
void
ahlog(ArchiveHandle *AH, int level, const char *fmt,...)
{
va_list ap;
if (AH->debugLevel < level && (!AH->public.verbose || level > 1))
return;
va_start(ap, fmt);
_write_msg(NULL, fmt, ap);
va_end(ap);
}
/*
* Single place for logic which says 'We are restoring to a direct DB connection'.
*/
2001-03-22 05:01:46 +01:00
int
RestoringToDB(ArchiveHandle *AH)
{
return (AH->ropt && AH->ropt->useDB && AH->connection);
}
2000-07-04 16:25:28 +02:00
/*
2001-03-22 05:01:46 +01:00
* Write buffer to the output file (usually stdout). This is user for
* outputting 'restore' scripts etc. It is even possible for an archive
* format to create a custom output routine to 'fake' a restore if it
* wants to generate a script (see TAR output).
2000-07-04 16:25:28 +02:00
*/
2001-03-22 05:01:46 +01:00
int
ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
size_t res;
2001-03-22 05:01:46 +01:00
if (AH->writingBlob)
{
if (AH->lo_buf_used + size * nmemb > AH->lo_buf_size)
{
/* Split LO buffer */
2002-09-04 22:31:48 +02:00
size_t remaining = AH->lo_buf_size - AH->lo_buf_used;
size_t slack = nmemb * size - remaining;
2002-09-04 22:31:48 +02:00
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining);
res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_size);
ahlog(AH, 5, "wrote %lu bytes of large object data (result = %lu)\n",
(unsigned long) AH->lo_buf_size, (unsigned long) res);
if (res != AH->lo_buf_size)
die_horribly(AH, modulename,
"could not write to large object (result: %lu, expected: %lu)\n",
2002-09-04 22:31:48 +02:00
(unsigned long) res, (unsigned long) AH->lo_buf_size);
memcpy(AH->lo_buf, (char *) ptr + remaining, slack);
AH->lo_buf_used = slack;
}
else
{
/* LO Buffer is still large enough, buffer it */
2002-09-04 22:31:48 +02:00
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, size * nmemb);
AH->lo_buf_used += size * nmemb;
}
return size * nmemb;
}
2001-03-22 05:01:46 +01:00
else if (AH->gzOut)
{
2001-03-22 05:01:46 +01:00
res = GZWRITE((void *) ptr, size, nmemb, AH->OF);
if (res != (nmemb * size))
die_horribly(AH, modulename, "could not write to compressed archive\n");
return res;
}
2001-03-22 05:01:46 +01:00
else if (AH->CustomOutPtr)
{
2001-03-22 05:01:46 +01:00
res = AH->CustomOutPtr (AH, ptr, size * nmemb);
if (res != (nmemb * size))
die_horribly(AH, modulename, "could not write to custom output routine\n");
return res;
}
else
{
/*
2001-03-22 05:01:46 +01:00
* If we're doing a restore, and it's direct to DB, and we're
* connected then send it to the DB.
*/
if (RestoringToDB(AH))
2001-03-22 05:01:46 +01:00
return ExecuteSqlCommandBuf(AH, (void *) ptr, size * nmemb); /* Always 1, currently */
else
{
2001-03-22 05:01:46 +01:00
res = fwrite((void *) ptr, size, nmemb, AH->OF);
if (res != nmemb)
die_horribly(AH, modulename, "could not write to output file (%lu != %lu)\n",
(unsigned long) res, (unsigned long) nmemb);
return res;
}
}
2001-03-22 05:01:46 +01:00
}
/* Common exit code */
2001-03-22 05:01:46 +01:00
static void
_write_msg(const char *modulename, const char *fmt, va_list ap)
{
if (modulename)
fprintf(stderr, "%s: [%s] ", progname, gettext(modulename));
else
fprintf(stderr, "%s: ", progname);
vfprintf(stderr, gettext(fmt), ap);
}
void
write_msg(const char *modulename, const char *fmt,...)
{
va_list ap;
va_start(ap, fmt);
_write_msg(modulename, fmt, ap);
va_end(ap);
}
static void
_die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt, va_list ap)
{
_write_msg(modulename, fmt, ap);
2003-08-04 02:43:34 +02:00
if (AH)
{
if (AH->public.verbose)
write_msg(NULL, "*** aborted because of error\n");
2001-03-22 05:01:46 +01:00
if (AH->connection)
PQfinish(AH->connection);
if (AH->blobConnection)
PQfinish(AH->blobConnection);
}
2001-03-22 05:01:46 +01:00
exit(1);
2000-07-04 16:25:28 +02:00
}
/* External use */
2001-03-22 05:01:46 +01:00
void
exit_horribly(Archive *AH, const char *modulename, const char *fmt,...)
{
2001-03-22 05:01:46 +01:00
va_list ap;
2001-03-22 05:01:46 +01:00
va_start(ap, fmt);
_die_horribly((ArchiveHandle *) AH, modulename, fmt, ap);
va_end(ap);
}
2000-07-04 16:25:28 +02:00
/* Archiver use (just different arg declaration) */
2001-03-22 05:01:46 +01:00
void
die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt,...)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
va_list ap;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
va_start(ap, fmt);
_die_horribly(AH, modulename, fmt, ap);
va_end(ap);
2000-07-04 16:25:28 +02:00
}
/* on some error, we may decide to go on... */
void
warn_or_die_horribly(ArchiveHandle *AH,
const char *modulename, const char *fmt, ...)
{
va_list ap;
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
switch(AH->stage) {
case STAGE_NONE:
/* Do nothing special */
break;
case STAGE_INITIALIZING:
if (AH->stage != AH->lastErrorStage) {
write_msg(modulename, "Error while INITIALIZING:\n");
}
break;
case STAGE_PROCESSING:
if (AH->stage != AH->lastErrorStage) {
write_msg(modulename, "Error while PROCESSING TOC:\n");
}
break;
case STAGE_FINALIZING:
if (AH->stage != AH->lastErrorStage) {
write_msg(modulename, "Error while FINALIZING:\n");
}
break;
}
if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE) {
write_msg(modulename, "Error from TOC Entry %d; %u %u %s %s %s\n", AH->currentTE->dumpId,
AH->currentTE->catalogId.tableoid, AH->currentTE->catalogId.oid,
AH->currentTE->desc, AH->currentTE->tag, AH->currentTE->owner);
}
AH->lastErrorStage = AH->stage;
AH->lastErrorTE = AH->currentTE;
va_start(ap, fmt);
if (AH->public.exit_on_error)
{
_die_horribly(AH, modulename, fmt, ap);
}
else
{
_write_msg(modulename, fmt, ap);
AH->public.n_errors++;
}
va_end(ap);
}
2001-03-22 05:01:46 +01:00
static void
_moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
te->prev->next = te->next;
te->next->prev = te->prev;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
te->prev = pos;
te->next = pos->next;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
pos->next->prev = te;
pos->next = te;
2000-07-04 16:25:28 +02:00
}
#ifdef NOT_USED
2001-03-22 05:01:46 +01:00
static void
_moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
te->prev->next = te->next;
te->next->prev = te->prev;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
te->prev = pos->prev;
te->next = pos;
pos->prev->next = te;
pos->prev = te;
2000-07-04 16:25:28 +02:00
}
#endif
2001-03-22 05:01:46 +01:00
static TocEntry *
getTocEntryByDumpId(ArchiveHandle *AH, DumpId id)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
TocEntry *te;
te = AH->toc->next;
while (te != AH->toc)
{
if (te->dumpId == id)
2001-03-22 05:01:46 +01:00
return te;
te = te->next;
}
return NULL;
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
int
TocIDRequired(ArchiveHandle *AH, DumpId id, RestoreOptions *ropt)
2000-07-04 16:25:28 +02:00
{
TocEntry *te = getTocEntryByDumpId(AH, id);
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (!te)
return 0;
2000-07-04 16:25:28 +02:00
return _tocEntryRequired(te, ropt, false);
2000-07-04 16:25:28 +02:00
}
size_t
WriteOffset(ArchiveHandle *AH, off_t o, int wasSet)
{
2003-08-04 02:43:34 +02:00
int off;
/* Save the flag */
(*AH->WriteBytePtr) (AH, wasSet);
/* Write out off_t smallest byte first, prevents endian mismatch */
for (off = 0; off < sizeof(off_t); off++)
{
2003-08-04 02:43:34 +02:00
(*AH->WriteBytePtr) (AH, o & 0xFF);
o >>= 8;
}
return sizeof(off_t) + 1;
}
int
ReadOffset(ArchiveHandle *AH, off_t *o)
{
2003-08-04 02:43:34 +02:00
int i;
int off;
int offsetFlg;
/* Initialize to zero */
*o = 0;
/* Check for old version */
if (AH->version < K_VERS_1_7)
{
/* Prior versions wrote offsets using WriteInt */
i = ReadInt(AH);
/* -1 means not set */
if (i < 0)
2003-08-04 02:43:34 +02:00
return K_OFFSET_POS_NOT_SET;
else if (i == 0)
2003-08-04 02:43:34 +02:00
return K_OFFSET_NO_DATA;
/* Cast to off_t because it was written as an int. */
2003-08-04 02:43:34 +02:00
*o = (off_t) i;
return K_OFFSET_POS_SET;
}
/*
2003-08-04 02:43:34 +02:00
* Read the flag indicating the state of the data pointer. Check if
* valid and die if not.
*
2003-08-04 02:43:34 +02:00
* This used to be handled by a negative or zero pointer, now we use an
* extra byte specifically for the state.
*/
offsetFlg = (*AH->ReadBytePtr) (AH) & 0xFF;
switch (offsetFlg)
{
case K_OFFSET_POS_NOT_SET:
case K_OFFSET_NO_DATA:
case K_OFFSET_POS_SET:
2003-08-04 02:43:34 +02:00
break;
default:
2003-08-04 02:43:34 +02:00
die_horribly(AH, modulename, "Unexpected data offset flag %d\n", offsetFlg);
}
/*
* Read the bytes
*/
for (off = 0; off < AH->offSize; off++)
{
if (off < sizeof(off_t))
*o |= ((off_t) ((*AH->ReadBytePtr) (AH))) << (off * 8);
else
{
if ((*AH->ReadBytePtr) (AH) != 0)
2003-08-04 02:43:34 +02:00
die_horribly(AH, modulename, "file offset in dump file is too large\n");
}
}
return offsetFlg;
}
size_t
2001-03-22 05:01:46 +01:00
WriteInt(ArchiveHandle *AH, int i)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
int b;
/*
* This is a bit yucky, but I don't want to make the binary format
* very dependent on representation, and not knowing much about it, I
2001-03-22 05:01:46 +01:00
* write out a sign byte. If you change this, don't forget to change
* the file version #, and modify readInt to read the new format AS
* WELL AS the old formats.
*/
/* SIGN byte */
if (i < 0)
{
(*AH->WriteBytePtr) (AH, 1);
i = -i;
2001-03-22 05:01:46 +01:00
}
else
(*AH->WriteBytePtr) (AH, 0);
for (b = 0; b < AH->intSize; b++)
{
(*AH->WriteBytePtr) (AH, i & 0xFF);
i >>= 8;
2001-03-22 05:01:46 +01:00
}
return AH->intSize + 1;
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
int
ReadInt(ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
int res = 0;
int bv,
b;
int sign = 0; /* Default positive */
int bitShift = 0;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (AH->version > K_VERS_1_0)
/* Read a sign byte */
2001-03-22 05:01:46 +01:00
sign = (*AH->ReadBytePtr) (AH);
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
for (b = 0; b < AH->intSize; b++)
{
bv = (*AH->ReadBytePtr) (AH) & 0xFF;
if (bv != 0)
res = res + (bv << bitShift);
bitShift += 8;
2001-03-22 05:01:46 +01:00
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (sign)
res = -res;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
return res;
2000-07-04 16:25:28 +02:00
}
size_t
WriteStr(ArchiveHandle *AH, const char *c)
2000-07-04 16:25:28 +02:00
{
size_t res;
if (c)
{
res = WriteInt(AH, strlen(c));
2001-03-22 05:01:46 +01:00
res += (*AH->WriteBufPtr) (AH, c, strlen(c));
}
else
res = WriteInt(AH, -1);
2001-03-22 05:01:46 +01:00
return res;
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
char *
ReadStr(ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
char *buf;
int l;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
l = ReadInt(AH);
if (l == -1)
buf = NULL;
else
{
2001-03-22 05:01:46 +01:00
buf = (char *) malloc(l + 1);
if (!buf)
die_horribly(AH, modulename, "out of memory\n");
2001-03-22 05:01:46 +01:00
(*AH->ReadBufPtr) (AH, (void *) buf, l);
buf[l] = '\0';
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
return buf;
2000-07-04 16:25:28 +02:00
}
2000-12-07 03:52:27 +01:00
static int
2001-03-22 05:01:46 +01:00
_discoverArchiveFormat(ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
FILE *fh;
char sig[6]; /* More than enough */
size_t cnt;
2001-03-22 05:01:46 +01:00
int wantClose = 0;
2000-07-04 16:25:28 +02:00
#if 0
write_msg(modulename, "attempting to ascertain archive format\n");
#endif
if (AH->lookahead)
free(AH->lookahead);
AH->lookaheadSize = 512;
AH->lookahead = calloc(1, 512);
AH->lookaheadLen = 0;
AH->lookaheadPos = 0;
2001-03-22 05:01:46 +01:00
if (AH->fSpec)
{
wantClose = 1;
fh = fopen(AH->fSpec, PG_BINARY_R);
2001-03-22 05:01:46 +01:00
}
else
fh = stdin;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (!fh)
die_horribly(AH, modulename, "could not open input file: %s\n", strerror(errno));
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
cnt = fread(sig, 1, 5, fh);
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (cnt != 5)
{
if (ferror(fh))
die_horribly(AH, modulename, "could not read input file: %s\n", strerror(errno));
else
die_horribly(AH, modulename, "input file is too short (read %lu, expected 5)\n",
(unsigned long) cnt);
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Save it, just in case we need it later */
strncpy(&AH->lookahead[0], sig, 5);
AH->lookaheadLen = 5;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (strncmp(sig, "PGDMP", 5) == 0)
{
AH->vmaj = fgetc(fh);
AH->vmin = fgetc(fh);
/* Save these too... */
AH->lookahead[AH->lookaheadLen++] = AH->vmaj;
AH->lookahead[AH->lookaheadLen++] = AH->vmin;
/* Check header version; varies from V1.0 */
2001-03-22 05:01:46 +01:00
if (AH->vmaj > 1 || ((AH->vmaj == 1) && (AH->vmin > 0))) /* Version > 1.0 */
{
AH->vrev = fgetc(fh);
AH->lookahead[AH->lookaheadLen++] = AH->vrev;
}
else
AH->vrev = 0;
/* Make a convenient integer <maj><min><rev>00 */
AH->version = ((AH->vmaj * 256 + AH->vmin) * 256 + AH->vrev) * 256 + 0;
AH->intSize = fgetc(fh);
AH->lookahead[AH->lookaheadLen++] = AH->intSize;
if (AH->version >= K_VERS_1_7)
{
AH->offSize = fgetc(fh);
AH->lookahead[AH->lookaheadLen++] = AH->offSize;
}
else
AH->offSize = AH->intSize;
AH->format = fgetc(fh);
AH->lookahead[AH->lookaheadLen++] = AH->format;
2001-03-22 05:01:46 +01:00
}
else
{
/*
2001-03-22 05:01:46 +01:00
* *Maybe* we have a tar archive format file... So, read first 512
* byte header...
*/
cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh);
AH->lookaheadLen += cnt;
2000-07-04 16:25:28 +02:00
if (AH->lookaheadLen != 512)
die_horribly(AH, modulename, "input file does not appear to be a valid archive (too short?)\n");
2000-07-04 16:25:28 +02:00
if (!isValidTarHeader(AH->lookahead))
die_horribly(AH, modulename, "input file does not appear to be a valid archive\n");
2000-07-04 16:25:28 +02:00
AH->format = archTar;
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* If we can't seek, then mark the header as read */
if (fseeko(fh, 0, SEEK_SET) != 0)
{
/*
* NOTE: Formats that use the lookahead buffer can unset this in
2001-03-22 05:01:46 +01:00
* their Init routine.
*/
AH->readHeader = 1;
}
else
2001-03-22 05:01:46 +01:00
AH->lookaheadLen = 0; /* Don't bother since we've reset the file */
#if 0
write_msg(modulename, "read %lu bytes into lookahead buffer\n",
(unsigned long) AH->lookaheadLen);
#endif
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Close the file */
if (wantClose)
if (fclose(fh) != 0)
die_horribly(AH, modulename, "could not close the input file after reading header: %s\n",
strerror(errno));
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
return AH->format;
2000-07-04 16:25:28 +02:00
}
/*
* Allocate an archive handle
*/
2001-03-22 05:01:46 +01:00
static ArchiveHandle *
_allocAH(const char *FileSpec, const ArchiveFormat fmt,
const int compression, ArchiveMode mode)
{
2001-03-22 05:01:46 +01:00
ArchiveHandle *AH;
2000-07-04 16:25:28 +02:00
#if 0
write_msg(modulename, "allocating AH for %s, format %d\n", FileSpec, fmt);
#endif
2001-03-22 05:01:46 +01:00
AH = (ArchiveHandle *) calloc(1, sizeof(ArchiveHandle));
if (!AH)
die_horribly(AH, modulename, "out of memory\n");
2000-07-04 16:25:28 +02:00
/* AH->debugLevel = 100; */
2001-03-22 05:01:46 +01:00
AH->vmaj = K_VERS_MAJOR;
AH->vmin = K_VERS_MINOR;
AH->vrev = K_VERS_REV;
2000-07-04 16:25:28 +02:00
AH->createDate = time(NULL);
2001-03-22 05:01:46 +01:00
AH->intSize = sizeof(int);
AH->offSize = sizeof(off_t);
2001-03-22 05:01:46 +01:00
if (FileSpec)
{
AH->fSpec = strdup(FileSpec);
2001-03-22 05:01:46 +01:00
/*
* Not used; maybe later....
*
2001-03-22 05:01:46 +01:00
* AH->workDir = strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ;
* i--) if (AH->workDir[i-1] == '/')
*/
2001-03-22 05:01:46 +01:00
}
else
AH->fSpec = NULL;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
AH->currUser = strdup(""); /* So it's valid, but we can free() it
* later if necessary */
2002-09-04 22:31:48 +02:00
AH->currSchema = strdup(""); /* ditto */
AH->currWithOids = -1; /* force SET */
2001-03-22 05:01:46 +01:00
AH->toc = (TocEntry *) calloc(1, sizeof(TocEntry));
if (!AH->toc)
die_horribly(AH, modulename, "out of memory\n");
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
AH->toc->next = AH->toc;
AH->toc->prev = AH->toc;
AH->mode = mode;
AH->compression = compression;
2000-07-04 16:25:28 +02:00
AH->pgCopyBuf = createPQExpBuffer();
AH->sqlBuf = createPQExpBuffer();
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Open stdout with no compression for AH output handle */
AH->gzOut = 0;
AH->OF = stdout;
2000-07-04 16:25:28 +02:00
#if 0
write_msg(modulename, "archive format is %d\n", fmt);
#endif
2001-03-22 05:01:46 +01:00
if (fmt == archUnknown)
AH->format = _discoverArchiveFormat(AH);
else
AH->format = fmt;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
switch (AH->format)
{
2000-07-04 16:25:28 +02:00
case archCustom:
InitArchiveFmt_Custom(AH);
break;
2000-07-04 16:25:28 +02:00
case archFiles:
InitArchiveFmt_Files(AH);
break;
2000-07-04 16:25:28 +02:00
case archNull:
InitArchiveFmt_Null(AH);
break;
2000-07-04 16:25:28 +02:00
case archTar:
InitArchiveFmt_Tar(AH);
break;
default:
die_horribly(AH, modulename, "unrecognized file format \"%d\"\n", fmt);
2001-03-22 05:01:46 +01:00
}
2000-07-04 16:25:28 +02:00
/* sql error handling */
AH->public.exit_on_error = true;
AH->public.n_errors = 0;
2001-03-22 05:01:46 +01:00
return AH;
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
void
WriteDataChunks(ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
TocEntry *te = AH->toc->next;
StartDataPtr startPtr;
EndDataPtr endPtr;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
while (te != AH->toc)
{
if (te->dataDumper != NULL)
{
2001-03-22 05:01:46 +01:00
AH->currToc = te;
/* printf("Writing data for %d (%x)\n", te->id, te); */
2001-03-22 05:01:46 +01:00
if (strcmp(te->desc, "BLOBS") == 0)
{
startPtr = AH->StartBlobsPtr;
endPtr = AH->EndBlobsPtr;
}
else
{
startPtr = AH->StartDataPtr;
endPtr = AH->EndDataPtr;
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (startPtr != NULL)
(*startPtr) (AH, te);
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/*
* printf("Dumper arg for %d is %x\n", te->id,
* te->dataDumperArg);
*/
/*
* The user-provided DataDumper routine needs to call
* AH->WriteData
*/
(*te->dataDumper) ((Archive *) AH, te->dataDumperArg);
2001-03-22 05:01:46 +01:00
if (endPtr != NULL)
(*endPtr) (AH, te);
AH->currToc = NULL;
}
te = te->next;
2001-03-22 05:01:46 +01:00
}
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
void
WriteToc(ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
TocEntry *te;
char workbuf[32];
int i;
2001-03-22 05:01:46 +01:00
/* printf("%d TOC Entries to save\n", AH->tocCount); */
WriteInt(AH, AH->tocCount);
for (te = AH->toc->next; te != AH->toc; te = te->next)
2001-03-22 05:01:46 +01:00
{
WriteInt(AH, te->dumpId);
2001-03-22 05:01:46 +01:00
WriteInt(AH, te->dataDumper ? 1 : 0);
/* OID is recorded as a string for historical reasons */
sprintf(workbuf, "%u", te->catalogId.tableoid);
WriteStr(AH, workbuf);
sprintf(workbuf, "%u", te->catalogId.oid);
WriteStr(AH, workbuf);
WriteStr(AH, te->tag);
2001-03-22 05:01:46 +01:00
WriteStr(AH, te->desc);
WriteStr(AH, te->defn);
WriteStr(AH, te->dropStmt);
WriteStr(AH, te->copyStmt);
WriteStr(AH, te->namespace);
2001-03-22 05:01:46 +01:00
WriteStr(AH, te->owner);
WriteStr(AH, te->withOids ? "true" : "false");
/* Dump list of dependencies */
for (i = 0; i < te->nDeps; i++)
{
sprintf(workbuf, "%d", te->dependencies[i]);
WriteStr(AH, workbuf);
}
WriteStr(AH, NULL); /* Terminate List */
2001-03-22 05:01:46 +01:00
if (AH->WriteExtraTocPtr)
(*AH->WriteExtraTocPtr) (AH, te);
}
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
void
ReadToc(ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
int i;
char *tmp;
DumpId *deps;
int depIdx;
int depSize;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
TocEntry *te = AH->toc->next;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
AH->tocCount = ReadInt(AH);
AH->maxDumpId = 0;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
for (i = 0; i < AH->tocCount; i++)
{
te = (TocEntry *) calloc(1, sizeof(TocEntry));
te->dumpId = ReadInt(AH);
if (te->dumpId > AH->maxDumpId)
AH->maxDumpId = te->dumpId;
/* Sanity check */
if (te->dumpId <= 0)
die_horribly(AH, modulename,
"entry ID %d out of range -- perhaps a corrupt TOC\n",
te->dumpId);
te->hadDumper = ReadInt(AH);
if (AH->version >= K_VERS_1_8)
{
tmp = ReadStr(AH);
sscanf(tmp, "%u", &te->catalogId.tableoid);
free(tmp);
}
else
te->catalogId.tableoid = InvalidOid;
tmp = ReadStr(AH);
sscanf(tmp, "%u", &te->catalogId.oid);
free(tmp);
te->tag = ReadStr(AH);
te->desc = ReadStr(AH);
te->defn = ReadStr(AH);
te->dropStmt = ReadStr(AH);
if (AH->version >= K_VERS_1_3)
te->copyStmt = ReadStr(AH);
if (AH->version >= K_VERS_1_6)
te->namespace = ReadStr(AH);
te->owner = ReadStr(AH);
if (AH->version >= K_VERS_1_9)
{
if (strcmp(ReadStr(AH), "true") == 0)
te->withOids = true;
else
te->withOids = false;
}
else
te->withOids = true;
/* Read TOC entry dependencies */
if (AH->version >= K_VERS_1_5)
{
depSize = 100;
deps = (DumpId *) malloc(sizeof(DumpId) * depSize);
depIdx = 0;
for (;;)
{
tmp = ReadStr(AH);
if (!tmp)
break; /* end of list */
if (depIdx >= depSize)
{
depSize *= 2;
deps = (DumpId *) realloc(deps, sizeof(DumpId) * depSize);
}
sscanf(tmp, "%d", &deps[depIdx]);
free(tmp);
depIdx++;
}
if (depIdx > 0) /* We have a non-null entry */
{
deps = (DumpId *) realloc(deps, sizeof(DumpId) * depIdx);
te->dependencies = deps;
te->nDeps = depIdx;
}
else
{
free(deps);
te->dependencies = NULL;
te->nDeps = 0;
}
}
else
{
te->dependencies = NULL;
te->nDeps = 0;
}
2001-03-22 05:01:46 +01:00
if (AH->ReadExtraTocPtr)
(*AH->ReadExtraTocPtr) (AH, te);
ahlog(AH, 3, "read TOC entry %d (ID %d) for %s %s\n",
i, te->dumpId, te->desc, te->tag);
te->prev = AH->toc->prev;
AH->toc->prev->next = te;
AH->toc->prev = te;
te->next = AH->toc;
2001-03-22 05:01:46 +01:00
}
2000-07-04 16:25:28 +02:00
}
static teReqs
_tocEntryRequired(TocEntry *te, RestoreOptions *ropt, bool acl_pass)
2000-07-04 16:25:28 +02:00
{
teReqs res = 3; /* Schema = 1, Data = 2, Both = 3 */
2001-03-22 05:01:46 +01:00
/* ENCODING objects are dumped specially, so always reject here */
if (strcmp(te->desc, "ENCODING") == 0)
return 0;
2001-03-22 05:01:46 +01:00
/* If it's an ACL, maybe ignore it */
if ((!acl_pass || ropt->aclsSkip) && strcmp(te->desc, "ACL") == 0)
return 0;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (!ropt->create && strcmp(te->desc, "DATABASE") == 0)
return 0;
2001-03-22 05:01:46 +01:00
/* Check if tablename only is wanted */
if (ropt->selTypes)
{
if ((strcmp(te->desc, "TABLE") == 0) || (strcmp(te->desc, "TABLE DATA") == 0))
{
if (!ropt->selTable)
return 0;
if (ropt->tableNames && strcmp(ropt->tableNames, te->tag) != 0)
return 0;
2001-03-22 05:01:46 +01:00
}
else if (strcmp(te->desc, "INDEX") == 0)
{
if (!ropt->selIndex)
return 0;
if (ropt->indexNames && strcmp(ropt->indexNames, te->tag) != 0)
return 0;
2001-03-22 05:01:46 +01:00
}
else if (strcmp(te->desc, "FUNCTION") == 0)
{
if (!ropt->selFunction)
return 0;
if (ropt->functionNames && strcmp(ropt->functionNames, te->tag) != 0)
return 0;
2001-03-22 05:01:46 +01:00
}
else if (strcmp(te->desc, "TRIGGER") == 0)
{
if (!ropt->selTrigger)
return 0;
if (ropt->triggerNames && strcmp(ropt->triggerNames, te->tag) != 0)
return 0;
}
2001-03-22 05:01:46 +01:00
else
return 0;
2000-07-04 16:25:28 +02:00
}
/*
* Check if we had a dataDumper. Indicates if the entry is schema or
* data
*/
if (!te->hadDumper)
{
/*
* Special Case: If 'SEQUENCE SET' then it is considered a data
* entry
*/
if (strcmp(te->desc, "SEQUENCE SET") == 0)
res = res & REQ_DATA;
else
res = res & ~REQ_DATA;
}
/*
* Special case: <Init> type with <Max OID> tag; this is part of a
* DATA restore even though it has SQL.
*/
if ((strcmp(te->desc, "<Init>") == 0) && (strcmp(te->tag, "Max OID") == 0))
res = REQ_DATA;
2001-03-22 05:01:46 +01:00
/* Mask it if we only want schema */
if (ropt->schemaOnly)
res = res & REQ_SCHEMA;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Mask it we only want data */
if (ropt->dataOnly)
res = res & REQ_DATA;
2000-07-04 16:25:28 +02:00
/* Mask it if we don't have a schema contribution */
2001-03-22 05:01:46 +01:00
if (!te->defn || strlen(te->defn) == 0)
res = res & ~REQ_SCHEMA;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
/* Finally, if we used a list, limit based on that as well */
if (ropt->limitToList && !ropt->idWanted[te->dumpId - 1])
return 0;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
return res;
2000-07-04 16:25:28 +02:00
}
/*
* Issue SET commands for parameters that we want to have set the same way
* at all times during execution of a restore script.
*/
static void
_doSetFixedOutputState(ArchiveHandle *AH)
{
TocEntry *te;
/* If we have an encoding setting, emit that */
te = AH->toc->next;
while (te != AH->toc)
{
if (strcmp(te->desc, "ENCODING") == 0)
{
ahprintf(AH, "%s", te->defn);
break;
}
te = te->next;
}
/* Make sure function checking is disabled */
ahprintf(AH, "SET check_function_bodies = false;\n");
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
/* Avoid annoying notices etc */
ahprintf(AH, "SET client_min_messages = warning;\n");
ahprintf(AH, "\n");
}
/*
* Issue a SET SESSION AUTHORIZATION command. Caller is responsible
* for updating state if appropriate. If user is NULL or an empty string,
* the specification DEFAULT will be used.
*/
static void
_doSetSessionAuth(ArchiveHandle *AH, const char *user)
{
PQExpBuffer cmd = createPQExpBuffer();
2002-09-04 22:31:48 +02:00
appendPQExpBuffer(cmd, "SET SESSION AUTHORIZATION ");
2002-09-04 22:31:48 +02:00
/*
* SQL requires a string literal here. Might as well be correct.
*/
if (user && *user)
appendStringLiteral(cmd, user, false);
else
appendPQExpBuffer(cmd, "DEFAULT");
appendPQExpBuffer(cmd, ";");
if (RestoringToDB(AH))
{
PGresult *res;
res = PQexec(AH->connection, cmd->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
/* NOT warn_or_die_horribly... use -O instead to skip this. */
die_horribly(AH, modulename, "could not set session user to \"%s\": %s",
user, PQerrorMessage(AH->connection));
PQclear(res);
}
else
ahprintf(AH, "%s\n\n", cmd->data);
destroyPQExpBuffer(cmd);
}
/*
* Issue a SET default_with_oids command. Caller is responsible
* for updating state if appropriate.
*/
static void
_doSetWithOids(ArchiveHandle *AH, const bool withOids)
{
PQExpBuffer cmd = createPQExpBuffer();
appendPQExpBuffer(cmd, "SET default_with_oids = %s;", withOids ?
"true" : "false");
if (RestoringToDB(AH))
{
PGresult *res;
res = PQexec(AH->connection, cmd->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_die_horribly(AH, modulename,
"could not set default_with_oids: %s",
PQerrorMessage(AH->connection));
PQclear(res);
}
else
ahprintf(AH, "%s\n\n", cmd->data);
destroyPQExpBuffer(cmd);
}
/*
* Issue the commands to connect to the specified database
* as the specified user.
*
* If we're currently restoring right into a database, this will
2002-09-04 22:31:48 +02:00
* actually establish a connection. Otherwise it puts a \connect into
* the script output.
*/
2001-03-22 05:01:46 +01:00
static void
_reconnectToDB(ArchiveHandle *AH, const char *dbname, const char *user)
{
if (RestoringToDB(AH))
ReconnectToServer(AH, dbname, user);
else
{
PQExpBuffer qry = createPQExpBuffer();
appendPQExpBuffer(qry, "\\connect %s",
dbname ? fmtId(dbname) : "-");
appendPQExpBuffer(qry, " %s\n\n",
fmtId(user));
2003-08-04 02:43:34 +02:00
ahprintf(AH, qry->data);
destroyPQExpBuffer(qry);
}
/*
* NOTE: currUser keeps track of what the imaginary session user in
* our script is
*/
if (AH->currUser)
free(AH->currUser);
AH->currUser = strdup(user);
/* don't assume we still know the output schema */
if (AH->currSchema)
free(AH->currSchema);
AH->currSchema = strdup("");
AH->currWithOids = -1;
/* re-establish fixed state */
_doSetFixedOutputState(AH);
}
/*
* Become the specified user, and update state to avoid redundant commands
*
* NULL or empty argument is taken to mean restoring the session default
*/
static void
_becomeUser(ArchiveHandle *AH, const char *user)
{
if (!user)
user = ""; /* avoid null pointers */
if (AH->currUser && strcmp(AH->currUser, user) == 0)
return; /* no need to do anything */
_doSetSessionAuth(AH, user);
/*
* NOTE: currUser keeps track of what the imaginary session user in
* our script is
*/
if (AH->currUser)
free(AH->currUser);
AH->currUser = strdup(user);
}
/*
* Become the owner of the the given TOC entry object. If
* changes in ownership are not allowed, this doesn't do anything.
*/
2001-03-22 05:01:46 +01:00
static void
_becomeOwner(ArchiveHandle *AH, TocEntry *te)
{
if (AH->ropt && (AH->ropt->noOwner || !AH->ropt->use_setsessauth))
return;
_becomeUser(AH, te->owner);
}
/*
* Set the proper default_with_oids value for the table.
*/
static void
_setWithOids(ArchiveHandle *AH, TocEntry *te)
{
if (AH->currWithOids != te->withOids)
{
_doSetWithOids(AH, te->withOids);
AH->currWithOids = te->withOids;
}
}
/*
* Issue the commands to select the specified schema as the current schema
* in the target database.
*/
static void
_selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
{
PQExpBuffer qry;
if (!schemaName || *schemaName == '\0' ||
strcmp(AH->currSchema, schemaName) == 0)
return; /* no need to do anything */
qry = createPQExpBuffer();
appendPQExpBuffer(qry, "SET search_path = %s",
fmtId(schemaName));
if (strcmp(schemaName, "pg_catalog") != 0)
appendPQExpBuffer(qry, ", pg_catalog");
if (RestoringToDB(AH))
{
PGresult *res;
res = PQexec(AH->connection, qry->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_die_horribly(AH, modulename,
"could not set search_path to \"%s\": %s",
schemaName, PQerrorMessage(AH->connection));
PQclear(res);
}
else
ahprintf(AH, "%s;\n\n", qry->data);
if (AH->currSchema)
free(AH->currSchema);
AH->currSchema = strdup(schemaName);
destroyPQExpBuffer(qry);
}
/**
* Parses the dropStmt part of a TOC entry and returns
* a newly allocated string that is the object identifier
* The caller must free the result.
*/
static char *
_getObjectFromDropStmt(const char *dropStmt, const char *type)
{
/* Chop "DROP" off the front and make a copy */
char *first = strdup(dropStmt + 5);
char *last = first + strlen(first) - 1; /* Points to the last real char in extract */
char *buf = NULL;
/* Loop from the end of the string until last char is no longer '\n' or ';' */
while (last >= first && (*last == '\n' || *last == ';')) {
last--;
}
/* Insert end of string one place after last */
*(last + 1) = '\0';
/* Take off CASCADE if necessary. Only TYPEs seem to have this, but may
* as well check for all */
if ((last - first) >= 8) {
if (strcmp(last - 7, " CASCADE") == 0)
last -= 8;
}
/* Insert end of string one place after last */
*(last + 1) = '\0';
/* Special case VIEWs and SEQUENCEs. They must use ALTER TABLE. */
if (strcmp(type, "VIEW") == 0 && (last - first) >= 5)
{
int len = 6 + strlen(first + 5) + 1;
buf = malloc(len);
snprintf(buf, len, "TABLE %s", first + 5);
free (first);
}
else if (strcmp(type, "SEQUENCE") == 0 && (last - first) >= 9)
{
int len = 6 + strlen(first + 9) + 1;
buf = malloc(len);
snprintf(buf, len, "TABLE %s", first + 9);
free (first);
}
else
{
buf = first;
}
return buf;
}
static void
_printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass)
2000-07-04 16:25:28 +02:00
{
const char *pfx;
/* ACLs are dumped only during acl pass */
if (acl_pass)
{
if (strcmp(te->desc, "ACL") != 0)
return;
}
else
{
if (strcmp(te->desc, "ACL") == 0)
return;
}
They are two different problems; the TOC entry is important for any multiline command or to rerun the command easily later. Whereas displaying the failed SQL command is a matter of fixing the error messages. The latter is complicated by failed COPY commands which, with die-on-errors off, results in the data being processed as a command, so dumping the command will dump all of the data. In the case of long commands, should the whole command be dumped? eg. (eg. several pages of function definition). In the case of the COPY command, I'm not sure what to do. Obviously, it would be best to avoid sending the data, but the data and command are combined (from memory). Also, the 'data' may be in the form of INSERT statements. Attached patch produces the first 125 chars of the command: pg_restore: [archiver (db)] Error while PROCESSING TOC: pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270 FUNCTION plpgsql_call_handler() pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_call_handler" already exists with same argument types Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS language_handler AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han... pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271 FUNCTION plpgsql_validator(oid) pjw pg_restore: [archiver (db)] could not execute query: ERROR: function "plpgsql_validator" already exists with same argument types Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator' LANGU... Philip Warner
2004-08-20 22:00:34 +02:00
if (AH->noTocComments)
return;
/*
* Avoid dumping the public schema, as it will already be created ...
* unless we are using --clean mode, in which case it's been deleted
* and we'd better recreate it.
*/
if (!ropt->dropSchema &&
strcmp(te->desc, "SCHEMA") == 0 && strcmp(te->tag, "public") == 0)
return;
/* Select owner and schema as necessary */
_becomeOwner(AH, te);
_selectOutputSchema(AH, te->namespace);
/* Set up OID mode too */
if (strcmp(te->desc, "TABLE") == 0)
_setWithOids(AH, te);
/* Emit header comment for item */
if (isData)
pfx = "Data for ";
else
pfx = "";
ahprintf(AH, "--\n");
if (AH->public.verbose)
{
ahprintf(AH, "-- TOC entry %d (class %u OID %u)\n",
te->dumpId, te->catalogId.tableoid, te->catalogId.oid);
if (te->nDeps > 0)
{
int i;
ahprintf(AH, "-- Dependencies:");
for (i = 0; i < te->nDeps; i++)
ahprintf(AH, " %d", te->dependencies[i]);
ahprintf(AH, "\n");
}
}
ahprintf(AH, "-- %sName: %s; Type: %s; Schema: %s; Owner: %s\n",
pfx, te->tag, te->desc,
te->namespace ? te->namespace : "-",
te->owner);
if (AH->PrintExtraTocPtr != NULL)
2001-03-22 05:01:46 +01:00
(*AH->PrintExtraTocPtr) (AH, te);
ahprintf(AH, "--\n\n");
2000-07-04 16:25:28 +02:00
/*
* Actually print the definition.
*
* Really crude hack for suppressing AUTHORIZATION clause of CREATE SCHEMA
* when --no-owner mode is selected. This is ugly, but I see no other
* good way ...
*/
if (AH->ropt && AH->ropt->noOwner && strcmp(te->desc, "SCHEMA") == 0)
{
ahprintf(AH, "CREATE SCHEMA %s;\n\n\n", te->tag);
}
else
{
if (strlen(te->defn) > 0)
ahprintf(AH, "%s\n\n", te->defn);
}
/*
* If we aren't using SET SESSION AUTH to determine ownership, we must
* instead issue an ALTER OWNER command. Ugly, since we have to
* cons one up based on the dropStmt. We don't need this for schemas
* (since we use CREATE SCHEMA AUTHORIZATION instead), nor for some other
* object types.
*/
if (!ropt->noOwner && !ropt->use_setsessauth &&
strlen(te->owner) > 0 && strlen(te->dropStmt) > 0 &&
(strcmp(te->desc, "AGGREGATE") == 0 ||
strcmp(te->desc, "CONVERSION") == 0 ||
strcmp(te->desc, "DOMAIN") == 0 ||
strcmp(te->desc, "FUNCTION") == 0 ||
strcmp(te->desc, "OPERATOR") == 0 ||
strcmp(te->desc, "OPERATOR CLASS") == 0 ||
strcmp(te->desc, "TABLE") == 0 ||
strcmp(te->desc, "TYPE") == 0 ||
strcmp(te->desc, "VIEW") == 0 ||
strcmp(te->desc, "SEQUENCE") == 0))
{
char *temp = _getObjectFromDropStmt(te->dropStmt, te->desc);
ahprintf(AH, "ALTER %s OWNER TO %s;\n\n", temp, fmtId(te->owner));
free(temp);
}
2000-07-04 16:25:28 +02:00
/*
* If it's an ACL entry, it might contain SET SESSION AUTHORIZATION
* commands, so we can no longer assume we know the current auth setting.
*/
if (strncmp(te->desc, "ACL", 3) == 0)
{
if (AH->currUser)
free(AH->currUser);
AH->currUser = NULL;
}
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
void
WriteHead(ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
struct tm crtm;
2001-03-22 05:01:46 +01:00
(*AH->WriteBufPtr) (AH, "PGDMP", 5); /* Magic code */
(*AH->WriteBytePtr) (AH, AH->vmaj);
(*AH->WriteBytePtr) (AH, AH->vmin);
(*AH->WriteBytePtr) (AH, AH->vrev);
(*AH->WriteBytePtr) (AH, AH->intSize);
(*AH->WriteBytePtr) (AH, AH->offSize);
2001-03-22 05:01:46 +01:00
(*AH->WriteBytePtr) (AH, AH->format);
2000-07-04 16:25:28 +02:00
#ifndef HAVE_LIBZ
2001-03-22 05:01:46 +01:00
if (AH->compression != 0)
write_msg(modulename, "WARNING: requested compression not available in this "
"installation -- archive will be uncompressed\n");
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
AH->compression = 0;
#endif
2000-07-04 16:25:28 +02:00
WriteInt(AH, AH->compression);
crtm = *localtime(&AH->createDate);
WriteInt(AH, crtm.tm_sec);
WriteInt(AH, crtm.tm_min);
WriteInt(AH, crtm.tm_hour);
WriteInt(AH, crtm.tm_mday);
WriteInt(AH, crtm.tm_mon);
WriteInt(AH, crtm.tm_year);
WriteInt(AH, crtm.tm_isdst);
WriteStr(AH, PQdb(AH->connection));
2000-07-04 16:25:28 +02:00
}
2001-03-22 05:01:46 +01:00
void
ReadHead(ArchiveHandle *AH)
2000-07-04 16:25:28 +02:00
{
2001-03-22 05:01:46 +01:00
char tmpMag[7];
int fmt;
struct tm crtm;
2000-07-04 16:25:28 +02:00
/* If we haven't already read the header... */
2001-03-22 05:01:46 +01:00
if (!AH->readHeader)
{
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
(*AH->ReadBufPtr) (AH, tmpMag, 5);
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (strncmp(tmpMag, "PGDMP", 5) != 0)
die_horribly(AH, modulename, "did not find magic string in file header\n");
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
AH->vmaj = (*AH->ReadBytePtr) (AH);
AH->vmin = (*AH->ReadBytePtr) (AH);
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (AH->vmaj > 1 || ((AH->vmaj == 1) && (AH->vmin > 0))) /* Version > 1.0 */
AH->vrev = (*AH->ReadBytePtr) (AH);
else
AH->vrev = 0;
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
AH->version = ((AH->vmaj * 256 + AH->vmin) * 256 + AH->vrev) * 256 + 0;
2000-07-04 16:25:28 +02:00
if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
die_horribly(AH, modulename, "unsupported version (%d.%d) in file header\n",
AH->vmaj, AH->vmin);
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
AH->intSize = (*AH->ReadBytePtr) (AH);
if (AH->intSize > 32)
die_horribly(AH, modulename, "sanity check on integer size (%lu) failed\n",
(unsigned long) AH->intSize);
2000-07-04 16:25:28 +02:00
if (AH->intSize > sizeof(int))
write_msg(modulename, "WARNING: archive was made on a machine with larger integers, some operations may fail\n");
2000-07-04 16:25:28 +02:00
if (AH->version >= K_VERS_1_7)
2003-08-04 02:43:34 +02:00
AH->offSize = (*AH->ReadBytePtr) (AH);
else
2003-08-04 02:43:34 +02:00
AH->offSize = AH->intSize;
2001-03-22 05:01:46 +01:00
fmt = (*AH->ReadBytePtr) (AH);
2000-07-04 16:25:28 +02:00
if (AH->format != fmt)
die_horribly(AH, modulename, "expected format (%d) differs from format found in file (%d)\n",
AH->format, fmt);
2001-03-22 05:01:46 +01:00
}
2000-07-04 16:25:28 +02:00
2001-03-22 05:01:46 +01:00
if (AH->version >= K_VERS_1_2)
{
if (AH->version < K_VERS_1_4)
2001-03-22 05:01:46 +01:00
AH->compression = (*AH->ReadBytePtr) (AH);
else
AH->compression = ReadInt(AH);
2001-03-22 05:01:46 +01:00
}
else
AH->compression = Z_DEFAULT_COMPRESSION;
2000-07-04 16:25:28 +02:00
#ifndef HAVE_LIBZ
2001-03-22 05:01:46 +01:00
if (AH->compression != 0)
write_msg(modulename, "WARNING: archive is compressed, but this installation does not support compression -- no data will be available\n");
2000-07-04 16:25:28 +02:00
#endif
if (AH->version >= K_VERS_1_4)
{
crtm.tm_sec = ReadInt(AH);
crtm.tm_min = ReadInt(AH);
crtm.tm_hour = ReadInt(AH);
crtm.tm_mday = ReadInt(AH);
crtm.tm_mon = ReadInt(AH);
crtm.tm_year = ReadInt(AH);
crtm.tm_isdst = ReadInt(AH);
AH->archdbname = ReadStr(AH);
AH->createDate = mktime(&crtm);
2001-03-22 05:01:46 +01:00
if (AH->createDate == (time_t) -1)
write_msg(modulename, "WARNING: invalid creation date in header\n");
}
2000-07-04 16:25:28 +02:00
}
/*
* checkSeek
* check to see if fseek can be performed.
*/
bool
checkSeek(FILE *fp)
{
2002-10-25 05:47:30 +02:00
if (fseeko(fp, 0, SEEK_CUR) != 0)
return false;
else if (sizeof(off_t) > sizeof(long))
2003-08-04 02:43:34 +02:00
/*
* At this point, off_t is too large for long, so we return based
* on whether an off_t version of fseek is available.
*/
#ifdef HAVE_FSEEKO
return true;
#else
return false;
#endif
else
return true;
}