Provide for parallel restoration from a custom format archive. Each data and

post-data step is run in a separate worker child (a thread on Windows, a child
process elsewhere) up to the concurrent number specified by the new pg_restore
command-line --multi-thread | -m switch.

Andrew Dunstan, with some editing by Tom Lane.
This commit is contained in:
Andrew Dunstan 2009-02-02 20:07:37 +00:00
parent 3a5b773715
commit 775f1b379e
11 changed files with 1509 additions and 276 deletions

View File

@ -1,4 +1,4 @@
<!-- $PostgreSQL: pgsql/doc/src/sgml/ref/pg_restore.sgml,v 1.77 2009/01/05 16:54:36 tgl Exp $ -->
<!-- $PostgreSQL: pgsql/doc/src/sgml/ref/pg_restore.sgml,v 1.78 2009/02/02 20:07:36 adunstan Exp $ -->
<refentry id="APP-PGRESTORE">
<refmeta>
@ -241,6 +241,28 @@
</listitem>
</varlistentry>
<varlistentry>
<term><option>-m <replaceable class="parameter">number-of-threads</replaceable></option></term>
<term><option>--multi-thread=<replaceable class="parameter">number-of-threads</replaceable></option></term>
<listitem>
<para>
Run the most time-consuming parts of <application>pg_restore</>
&mdash; those which load data, create indexes, or create
constraints &mdash; using multiple concurrent connections to the
database. This option can dramatically reduce the time to restore a
large database to a server running on a multi-processor machine.
</para>
<para>
This option is ignored when emitting a script rather than connecting
directly to a database server. Multiple threads cannot be used
together with <option>--single-transaction</option>. Also, the input
must be a plain file (not, for example, a pipe), and at present only
the custom archive format is supported.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>-n <replaceable class="parameter">namespace</replaceable></option></term>
<term><option>--schema=<replaceable class="parameter">schema</replaceable></option></term>

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.48 2009/01/05 16:54:36 tgl Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.49 2009/02/02 20:07:36 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -53,6 +53,14 @@ typedef enum _archiveMode
archModeRead
} ArchiveMode;
typedef enum _teSection
{
SECTION_NONE = 1, /* COMMENTs, ACLs, etc; can be anywhere */
SECTION_PRE_DATA, /* stuff to be processed before data */
SECTION_DATA, /* TABLE DATA, BLOBS, BLOB COMMENTS */
SECTION_POST_DATA /* stuff to be processed after data */
} teSection;
/*
* We may want to have some more user-readable data, but in the mean
* time this gives us some abstraction and type checking.
@ -124,6 +132,7 @@ typedef struct _restoreOptions
int suppressDumpWarnings; /* Suppress output of WARNING entries
* to stderr */
bool single_txn;
int number_of_threads;
bool *idWanted; /* array showing which dump IDs to emit */
} RestoreOptions;
@ -152,7 +161,8 @@ extern void ArchiveEntry(Archive *AHX,
const char *tag,
const char *namespace, const char *tablespace,
const char *owner, bool withOids,
const char *desc, const char *defn,
const char *desc, teSection section,
const char *defn,
const char *dropStmt, const char *copyStmt,
const DumpId *deps, int nDeps,
DataDumperPtr dumpFn, void *dumpArg);

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.76 2007/11/07 12:24:24 petere Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.77 2009/02/02 20:07:37 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -62,7 +62,7 @@ typedef z_stream *z_streamp;
#endif
#define K_VERS_MAJOR 1
#define K_VERS_MINOR 10
#define K_VERS_MINOR 11
#define K_VERS_REV 0
/* Data block types */
@ -85,8 +85,9 @@ typedef z_stream *z_streamp;
#define K_VERS_1_9 (( (1 * 256 + 9) * 256 + 0) * 256 + 0) /* add default_with_oids
* tracking */
#define K_VERS_1_10 (( (1 * 256 + 10) * 256 + 0) * 256 + 0) /* add tablespace */
#define K_VERS_1_11 (( (1 * 256 + 11) * 256 + 0) * 256 + 0) /* add toc section indicator */
#define K_VERS_MAX (( (1 * 256 + 10) * 256 + 255) * 256 + 0)
#define K_VERS_MAX (( (1 * 256 + 11) * 256 + 255) * 256 + 0)
/* Flags to indicate disposition of offsets stored in files */
@ -99,6 +100,7 @@ struct _tocEntry;
struct _restoreList;
typedef void (*ClosePtr) (struct _archiveHandle * AH);
typedef void (*ReopenPtr) (struct _archiveHandle * AH);
typedef void (*ArchiveEntryPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
typedef void (*StartDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
@ -120,6 +122,9 @@ typedef void (*ReadExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry *
typedef void (*PrintExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
typedef void (*PrintTocDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te, RestoreOptions *ropt);
typedef void (*ClonePtr) (struct _archiveHandle * AH);
typedef void (*DeClonePtr) (struct _archiveHandle * AH);
typedef size_t (*CustomOutPtr) (struct _archiveHandle * AH, const void *buf, size_t len);
typedef struct _outputContext
@ -212,6 +217,7 @@ typedef struct _archiveHandle
WriteBufPtr WriteBufPtr; /* Write a buffer of output to the archive */
ReadBufPtr ReadBufPtr; /* Read a buffer of input from the archive */
ClosePtr ClosePtr; /* Close the archive */
ReopenPtr ReopenPtr; /* Reopen the archive */
WriteExtraTocPtr WriteExtraTocPtr; /* Write extra TOC entry data
* associated with the current archive
* format */
@ -225,11 +231,15 @@ typedef struct _archiveHandle
StartBlobPtr StartBlobPtr;
EndBlobPtr EndBlobPtr;
ClonePtr ClonePtr; /* Clone format-specific fields */
DeClonePtr DeClonePtr; /* Clean up cloned fields */
CustomOutPtr CustomOutPtr; /* Alternative script output routine */
/* Stuff for direct DB connection */
char *archdbname; /* DB name *read* from archive */
bool requirePassword;
char *savedPassword; /* password for ropt->username, if known */
PGconn *connection;
int connectToDB; /* Flag to indicate if direct DB connection is
* required */
@ -260,9 +270,9 @@ typedef struct _archiveHandle
* etc */
/* these vars track state to avoid sending redundant SET commands */
char *currUser; /* current username */
char *currSchema; /* current schema */
char *currTablespace; /* current tablespace */
char *currUser; /* current username, or NULL if unknown */
char *currSchema; /* current schema, or NULL */
char *currTablespace; /* current tablespace, or NULL */
bool currWithOids; /* current default_with_oids setting */
void *lo_buf;
@ -282,6 +292,7 @@ typedef struct _tocEntry
struct _tocEntry *next;
CatalogId catalogId;
DumpId dumpId;
teSection section;
bool hadDumper; /* Archiver was passed a dumper routine (used
* in restore) */
char *tag; /* index tag */
@ -300,6 +311,13 @@ typedef struct _tocEntry
DataDumperPtr dataDumper; /* Routine to dump data for object */
void *dataDumperArg; /* Arg for above routine */
void *formatData; /* TOC Entry data specific to file format */
/* working state (needed only for parallel restore) */
bool restored; /* item is in progress or done */
bool created; /* set for DATA member if TABLE was created */
int depCount; /* number of dependencies not yet restored */
DumpId *lockDeps; /* dumpIds of objects this one needs lock on */
int nLockDeps; /* number of such dependencies */
} TocEntry;
/* Used everywhere */

View File

@ -19,7 +19,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.40 2007/10/28 21:55:52 tgl Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.41 2009/02/02 20:07:37 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -40,6 +40,7 @@ static int _ReadByte(ArchiveHandle *);
static size_t _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
static size_t _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
static void _CloseArchive(ArchiveHandle *AH);
static void _ReopenArchive(ArchiveHandle *AH);
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
@ -54,6 +55,8 @@ static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
static void _LoadBlobs(ArchiveHandle *AH);
static void _Clone(ArchiveHandle *AH);
static void _DeClone(ArchiveHandle *AH);
/*------------
* Buffers used in zlib compression and extra data stored in archive and
@ -120,6 +123,7 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
AH->WriteBufPtr = _WriteBuf;
AH->ReadBufPtr = _ReadBuf;
AH->ClosePtr = _CloseArchive;
AH->ReopenPtr = _ReopenArchive;
AH->PrintTocDataPtr = _PrintTocData;
AH->ReadExtraTocPtr = _ReadExtraToc;
AH->WriteExtraTocPtr = _WriteExtraToc;
@ -129,6 +133,8 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
AH->StartBlobPtr = _StartBlob;
AH->EndBlobPtr = _EndBlob;
AH->EndBlobsPtr = _EndBlobs;
AH->ClonePtr = _Clone;
AH->DeClonePtr = _DeClone;
/*
* Set up some special context used in compressing data.
@ -569,7 +575,6 @@ _PrintData(ArchiveHandle *AH)
zp->avail_in = blkLen;
#ifdef HAVE_LIBZ
if (AH->compression != 0)
{
while (zp->avail_in != 0)
@ -585,15 +590,12 @@ _PrintData(ArchiveHandle *AH)
}
}
else
{
#endif
{
in[zp->avail_in] = '\0';
ahwrite(in, 1, zp->avail_in, AH);
zp->avail_in = 0;
#ifdef HAVE_LIBZ
}
#endif
blkLen = ReadInt(AH);
}
@ -822,11 +824,9 @@ _CloseArchive(ArchiveHandle *AH)
* expect to be doing seeks to read the data back - it may be ok to
* just use the existing self-consistent block formatting.
*/
if (ctx->hasSeek)
{
fseeko(AH->FH, tpos, SEEK_SET);
if (ctx->hasSeek &&
fseeko(AH->FH, tpos, SEEK_SET) == 0)
WriteToc(AH);
}
}
if (fclose(AH->FH) != 0)
@ -835,6 +835,48 @@ _CloseArchive(ArchiveHandle *AH)
AH->FH = NULL;
}
/*
* Reopen the archive's file handle.
*
* We close the original file handle, except on Windows. (The difference
* is because on Windows, this is used within a multithreading context,
* and we don't want a thread closing the parent file handle.)
*/
static void
_ReopenArchive(ArchiveHandle *AH)
{
lclContext *ctx = (lclContext *) AH->formatData;
pgoff_t tpos;
if (AH->mode == archModeWrite)
die_horribly(AH,modulename,"can only reopen input archives\n");
if (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0)
die_horribly(AH,modulename,"cannot reopen stdin\n");
if (!ctx->hasSeek)
die_horribly(AH,modulename,"cannot reopen non-seekable file\n");
errno = 0;
tpos = ftello(AH->FH);
if (errno)
die_horribly(AH, modulename, "could not determine seek position in archive file: %s\n",
strerror(errno));
#ifndef WIN32
if (fclose(AH->FH) != 0)
die_horribly(AH, modulename, "could not close archive file: %s\n",
strerror(errno));
#endif
AH->FH = fopen(AH->fSpec, PG_BINARY_R);
if (!AH->FH)
die_horribly(AH, modulename, "could not open input file \"%s\": %s\n",
AH->fSpec, strerror(errno));
if (fseeko(AH->FH, tpos, SEEK_SET) != 0)
die_horribly(AH, modulename, "could not set seek position in archive file: %s\n",
strerror(errno));
}
/*--------------------------------------------------
* END OF FORMAT CALLBACKS
*--------------------------------------------------
@ -990,7 +1032,6 @@ _DoDeflate(ArchiveHandle *AH, lclContext *ctx, int flush)
/*
* Terminate zlib context and flush it's buffers. If no zlib
* then just return.
*
*/
static void
_EndDataCompressor(ArchiveHandle *AH, TocEntry *te)
@ -1020,3 +1061,44 @@ _EndDataCompressor(ArchiveHandle *AH, TocEntry *te)
/* Send the end marker */
WriteInt(AH, 0);
}
/*
* Clone format-specific fields during parallel restoration.
*/
static void
_Clone(ArchiveHandle *AH)
{
lclContext *ctx = (lclContext *) AH->formatData;
AH->formatData = (lclContext *) malloc(sizeof(lclContext));
if (AH->formatData == NULL)
die_horribly(AH, modulename, "out of memory\n");
memcpy(AH->formatData, ctx, sizeof(lclContext));
ctx = (lclContext *) AH->formatData;
ctx->zp = (z_streamp) malloc(sizeof(z_stream));
ctx->zlibOut = (char *) malloc(zlibOutSize + 1);
ctx->zlibIn = (char *) malloc(ctx->inSize);
if (ctx->zp == NULL || ctx->zlibOut == NULL || ctx->zlibIn == NULL)
die_horribly(AH, modulename, "out of memory\n");
/*
* Note: we do not make a local lo_buf because we expect at most one
* BLOBS entry per archive, so no parallelism is possible. Likewise,
* TOC-entry-local state isn't an issue because any one TOC entry is
* touched by just one worker child.
*/
}
static void
_DeClone(ArchiveHandle *AH)
{
lclContext *ctx = (lclContext *) AH->formatData;
free(ctx->zlibOut);
free(ctx->zlibIn);
free(ctx->zp);
free(ctx);
}

View File

@ -5,7 +5,7 @@
* Implements the basic DB functions used by the archiver.
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.80 2008/08/16 02:25:06 tgl Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.81 2009/02/02 20:07:37 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -116,29 +116,36 @@ ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *username)
/*
* Connect to the db again.
*
* Note: it's not really all that sensible to use a single-entry password
* cache if the username keeps changing. In current usage, however, the
* username never does change, so one savedPassword is sufficient. We do
* update the cache on the off chance that the password has changed since the
* start of the run.
*/
static PGconn *
_connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
{
PGconn *newConn;
char *newdb;
char *newuser;
char *password = NULL;
const char *newdb;
const char *newuser;
char *password = AH->savedPassword;
bool new_pass;
if (!reqdb)
newdb = PQdb(AH->connection);
else
newdb = (char *) reqdb;
newdb = reqdb;
if (!requser || (strlen(requser) == 0))
if (!requser || strlen(requser) == 0)
newuser = PQuser(AH->connection);
else
newuser = (char *) requser;
newuser = requser;
ahlog(AH, 1, "connecting to database \"%s\" as user \"%s\"\n", newdb, newuser);
ahlog(AH, 1, "connecting to database \"%s\" as user \"%s\"\n",
newdb, newuser);
if (AH->requirePassword)
if (AH->requirePassword && password == NULL)
{
password = simple_prompt("Password: ", 100, false);
if (password == NULL)
@ -170,12 +177,13 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
if (password)
free(password);
password = simple_prompt("Password: ", 100, false);
if (password == NULL)
die_horribly(AH, modulename, "out of memory\n");
new_pass = true;
}
} while (new_pass);
if (password)
free(password);
AH->savedPassword = password;
/* check for version mismatch */
_check_database_version(AH);
@ -190,6 +198,10 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
* Make a database connection with the given parameters. The
* connection handle is returned, the parameters are stored in AHX.
* An interactive password prompt is automatically issued if required.
*
* Note: it's not really all that sensible to use a single-entry password
* cache if the username keeps changing. In current usage, however, the
* username never does change, so one savedPassword is sufficient.
*/
PGconn *
ConnectDatabase(Archive *AHX,
@ -200,21 +212,19 @@ ConnectDatabase(Archive *AHX,
int reqPwd)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
char *password = NULL;
char *password = AH->savedPassword;
bool new_pass;
if (AH->connection)
die_horribly(AH, modulename, "already connected to a database\n");
if (reqPwd)
if (reqPwd && password == NULL)
{
password = simple_prompt("Password: ", 100, false);
if (password == NULL)
die_horribly(AH, modulename, "out of memory\n");
AH->requirePassword = true;
}
else
AH->requirePassword = false;
AH->requirePassword = reqPwd;
/*
* Start the connection. Loop until we have a password if requested by
@ -236,12 +246,13 @@ ConnectDatabase(Archive *AHX,
{
PQfinish(AH->connection);
password = simple_prompt("Password: ", 100, false);
if (password == NULL)
die_horribly(AH, modulename, "out of memory\n");
new_pass = true;
}
} while (new_pass);
if (password)
free(password);
AH->savedPassword = password;
/* check to see that the backend connection was successfully made */
if (PQstatus(AH->connection) == CONNECTION_BAD)

View File

@ -20,7 +20,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.34 2007/10/28 21:55:52 tgl Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.35 2009/02/02 20:07:37 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -87,6 +87,7 @@ InitArchiveFmt_Files(ArchiveHandle *AH)
AH->WriteBufPtr = _WriteBuf;
AH->ReadBufPtr = _ReadBuf;
AH->ClosePtr = _CloseArchive;
AH->ReopenPtr = NULL;
AH->PrintTocDataPtr = _PrintTocData;
AH->ReadExtraTocPtr = _ReadExtraToc;
AH->WriteExtraTocPtr = _WriteExtraToc;
@ -96,6 +97,8 @@ InitArchiveFmt_Files(ArchiveHandle *AH)
AH->StartBlobPtr = _StartBlob;
AH->EndBlobPtr = _EndBlob;
AH->EndBlobsPtr = _EndBlobs;
AH->ClonePtr = NULL;
AH->DeClonePtr = NULL;
/*
* Set up some special context used in compressing data.

View File

@ -17,7 +17,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.19 2006/07/14 14:52:26 momjian Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.20 2009/02/02 20:07:37 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -54,12 +54,15 @@ InitArchiveFmt_Null(ArchiveHandle *AH)
AH->WriteBytePtr = _WriteByte;
AH->WriteBufPtr = _WriteBuf;
AH->ClosePtr = _CloseArchive;
AH->ReopenPtr = NULL;
AH->PrintTocDataPtr = _PrintTocData;
AH->StartBlobsPtr = _StartBlobs;
AH->StartBlobPtr = _StartBlob;
AH->EndBlobPtr = _EndBlob;
AH->EndBlobsPtr = _EndBlobs;
AH->ClonePtr = NULL;
AH->DeClonePtr = NULL;
/* Initialize LO buffering */
AH->lo_buf_size = LOBBUFSIZE;

View File

@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.62 2007/11/15 21:14:41 momjian Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.63 2009/02/02 20:07:37 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -143,6 +143,7 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
AH->WriteBufPtr = _WriteBuf;
AH->ReadBufPtr = _ReadBuf;
AH->ClosePtr = _CloseArchive;
AH->ReopenPtr = NULL;
AH->PrintTocDataPtr = _PrintTocData;
AH->ReadExtraTocPtr = _ReadExtraToc;
AH->WriteExtraTocPtr = _WriteExtraToc;
@ -152,6 +153,8 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
AH->StartBlobPtr = _StartBlob;
AH->EndBlobPtr = _EndBlob;
AH->EndBlobsPtr = _EndBlobs;
AH->ClonePtr = NULL;
AH->DeClonePtr = NULL;
/*
* Set up some special context used in compressing data.
@ -1383,5 +1386,4 @@ _tarWriteHeader(TAR_MEMBER *th)
if (fwrite(h, 1, 512, th->tarFH) != 512)
die_horribly(th->AH, modulename, "could not write to output file: %s\n", strerror(errno));
}

View File

@ -12,7 +12,7 @@
* by PostgreSQL
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.518 2009/02/02 19:31:39 alvherre Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.519 2009/02/02 20:07:37 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -695,6 +695,7 @@ main(int argc, char **argv)
{
/* Add placeholders to allow correct sorting of blobs */
DumpableObject *blobobj;
DumpableObject *blobcobj;
blobobj = (DumpableObject *) malloc(sizeof(DumpableObject));
blobobj->objType = DO_BLOBS;
@ -702,11 +703,12 @@ main(int argc, char **argv)
AssignDumpId(blobobj);
blobobj->name = strdup("BLOBS");
blobobj = (DumpableObject *) malloc(sizeof(DumpableObject));
blobobj->objType = DO_BLOB_COMMENTS;
blobobj->catId = nilCatalogId;
AssignDumpId(blobobj);
blobobj->name = strdup("BLOB COMMENTS");
blobcobj = (DumpableObject *) malloc(sizeof(DumpableObject));
blobcobj->objType = DO_BLOB_COMMENTS;
blobcobj->catId = nilCatalogId;
AssignDumpId(blobcobj);
blobcobj->name = strdup("BLOB COMMENTS");
addObjectDependency(blobcobj, blobobj->dumpId);
}
/*
@ -1385,11 +1387,10 @@ dumpTableData(Archive *fout, TableDataInfo *tdinfo)
}
ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname, false,
"TABLE DATA", "", "", copyStmt,
tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
NULL, tbinfo->rolname,
false, "TABLE DATA", SECTION_DATA,
"", "", copyStmt,
tdinfo->dobj.dependencies, tdinfo->dobj.nDeps,
dumpFn, tdinfo);
@ -1738,6 +1739,7 @@ dumpDatabase(Archive *AH)
dba, /* Owner */
false, /* with oids */
"DATABASE", /* Desc */
SECTION_PRE_DATA, /* Section */
creaQry->data, /* Create */
delQry->data, /* Del */
NULL, /* Copy */
@ -1764,7 +1766,8 @@ dumpDatabase(Archive *AH)
appendPQExpBuffer(dbQry, ";\n");
ArchiveEntry(AH, dbCatId, createDumpId(), datname, NULL, NULL,
dba, false, "COMMENT", dbQry->data, "", NULL,
dba, false, "COMMENT", SECTION_NONE,
dbQry->data, "", NULL,
&dbDumpId, 1, NULL, NULL);
}
}
@ -1802,7 +1805,8 @@ dumpEncoding(Archive *AH)
ArchiveEntry(AH, nilCatalogId, createDumpId(),
"ENCODING", NULL, NULL, "",
false, "ENCODING", qry->data, "", NULL,
false, "ENCODING", SECTION_PRE_DATA,
qry->data, "", NULL,
NULL, 0,
NULL, NULL);
@ -1828,7 +1832,8 @@ dumpStdStrings(Archive *AH)
ArchiveEntry(AH, nilCatalogId, createDumpId(),
"STDSTRINGS", NULL, NULL, "",
false, "STDSTRINGS", qry->data, "", NULL,
false, "STDSTRINGS", SECTION_PRE_DATA,
qry->data, "", NULL,
NULL, 0,
NULL, NULL);
@ -5514,9 +5519,15 @@ dumpComment(Archive *fout, const char *target,
appendStringLiteralAH(query, comments->descr, fout);
appendPQExpBuffer(query, ";\n");
/*
* We mark comments as SECTION_NONE because they really belong
* in the same section as their parent, whether that is
* pre-data or post-data.
*/
ArchiveEntry(fout, nilCatalogId, createDumpId(),
target, namespace, NULL, owner, false,
"COMMENT", query->data, "", NULL,
target, namespace, NULL, owner,
false, "COMMENT", SECTION_NONE,
query->data, "", NULL,
&(dumpId), 1,
NULL, NULL);
@ -5575,9 +5586,9 @@ dumpTableComment(Archive *fout, TableInfo *tbinfo,
ArchiveEntry(fout, nilCatalogId, createDumpId(),
target->data,
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "COMMENT", query->data, "", NULL,
NULL, tbinfo->rolname,
false, "COMMENT", SECTION_NONE,
query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
}
@ -5597,9 +5608,9 @@ dumpTableComment(Archive *fout, TableInfo *tbinfo,
ArchiveEntry(fout, nilCatalogId, createDumpId(),
target->data,
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "COMMENT", query->data, "", NULL,
NULL, tbinfo->rolname,
false, "COMMENT", SECTION_NONE,
query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
}
@ -5872,15 +5883,17 @@ dumpDumpableObject(Archive *fout, DumpableObject *dobj)
case DO_BLOBS:
ArchiveEntry(fout, dobj->catId, dobj->dumpId,
dobj->name, NULL, NULL, "",
false, "BLOBS", "", "", NULL,
NULL, 0,
false, "BLOBS", SECTION_DATA,
"", "", NULL,
dobj->dependencies, dobj->nDeps,
dumpBlobs, NULL);
break;
case DO_BLOB_COMMENTS:
ArchiveEntry(fout, dobj->catId, dobj->dumpId,
dobj->name, NULL, NULL, "",
false, "BLOB COMMENTS", "", "", NULL,
NULL, 0,
false, "BLOB COMMENTS", SECTION_DATA,
"", "", NULL,
dobj->dependencies, dobj->nDeps,
dumpBlobComments, NULL);
break;
}
@ -5918,7 +5931,8 @@ dumpNamespace(Archive *fout, NamespaceInfo *nspinfo)
nspinfo->dobj.name,
NULL, NULL,
nspinfo->rolname,
false, "SCHEMA", q->data, delq->data, NULL,
false, "SCHEMA", SECTION_PRE_DATA,
q->data, delq->data, NULL,
nspinfo->dobj.dependencies, nspinfo->dobj.nDeps,
NULL, NULL);
@ -6021,7 +6035,8 @@ dumpEnumType(Archive *fout, TypeInfo *tinfo)
tinfo->dobj.namespace->dobj.name,
NULL,
tinfo->rolname, false,
"TYPE", q->data, delq->data, NULL,
"TYPE", SECTION_PRE_DATA,
q->data, delq->data, NULL,
tinfo->dobj.dependencies, tinfo->dobj.nDeps,
NULL, NULL);
@ -6389,7 +6404,8 @@ dumpBaseType(Archive *fout, TypeInfo *tinfo)
tinfo->dobj.namespace->dobj.name,
NULL,
tinfo->rolname, false,
"TYPE", q->data, delq->data, NULL,
"TYPE", SECTION_PRE_DATA,
q->data, delq->data, NULL,
tinfo->dobj.dependencies, tinfo->dobj.nDeps,
NULL, NULL);
@ -6507,7 +6523,8 @@ dumpDomain(Archive *fout, TypeInfo *tinfo)
tinfo->dobj.namespace->dobj.name,
NULL,
tinfo->rolname, false,
"DOMAIN", q->data, delq->data, NULL,
"DOMAIN", SECTION_PRE_DATA,
q->data, delq->data, NULL,
tinfo->dobj.dependencies, tinfo->dobj.nDeps,
NULL, NULL);
@ -6600,7 +6617,8 @@ dumpCompositeType(Archive *fout, TypeInfo *tinfo)
tinfo->dobj.namespace->dobj.name,
NULL,
tinfo->rolname, false,
"TYPE", q->data, delq->data, NULL,
"TYPE", SECTION_PRE_DATA,
q->data, delq->data, NULL,
tinfo->dobj.dependencies, tinfo->dobj.nDeps,
NULL, NULL);
@ -6653,7 +6671,8 @@ dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
stinfo->dobj.namespace->dobj.name,
NULL,
stinfo->baseType->rolname, false,
"SHELL TYPE", q->data, "", NULL,
"SHELL TYPE", SECTION_PRE_DATA,
q->data, "", NULL,
stinfo->dobj.dependencies, stinfo->dobj.nDeps,
NULL, NULL);
@ -6773,7 +6792,7 @@ dumpProcLang(Archive *fout, ProcLangInfo *plang)
ArchiveEntry(fout, plang->dobj.catId, plang->dobj.dumpId,
plang->dobj.name,
lanschema, NULL, plang->lanowner,
false, "PROCEDURAL LANGUAGE",
false, "PROCEDURAL LANGUAGE", SECTION_PRE_DATA,
defqry->data, delqry->data, NULL,
plang->dobj.dependencies, plang->dobj.nDeps,
NULL, NULL);
@ -7331,7 +7350,8 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
finfo->dobj.namespace->dobj.name,
NULL,
finfo->rolname, false,
"FUNCTION", q->data, delqry->data, NULL,
"FUNCTION", SECTION_PRE_DATA,
q->data, delqry->data, NULL,
finfo->dobj.dependencies, finfo->dobj.nDeps,
NULL, NULL);
@ -7482,7 +7502,8 @@ dumpCast(Archive *fout, CastInfo *cast)
ArchiveEntry(fout, cast->dobj.catId, cast->dobj.dumpId,
castsig->data,
"pg_catalog", NULL, "",
false, "CAST", defqry->data, delqry->data, NULL,
false, "CAST", SECTION_PRE_DATA,
defqry->data, delqry->data, NULL,
cast->dobj.dependencies, cast->dobj.nDeps,
NULL, NULL);
@ -7723,7 +7744,8 @@ dumpOpr(Archive *fout, OprInfo *oprinfo)
oprinfo->dobj.namespace->dobj.name,
NULL,
oprinfo->rolname,
false, "OPERATOR", q->data, delq->data, NULL,
false, "OPERATOR", SECTION_PRE_DATA,
q->data, delq->data, NULL,
oprinfo->dobj.dependencies, oprinfo->dobj.nDeps,
NULL, NULL);
@ -8175,7 +8197,8 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
opcinfo->dobj.namespace->dobj.name,
NULL,
opcinfo->rolname,
false, "OPERATOR CLASS", q->data, delq->data, NULL,
false, "OPERATOR CLASS", SECTION_PRE_DATA,
q->data, delq->data, NULL,
opcinfo->dobj.dependencies, opcinfo->dobj.nDeps,
NULL, NULL);
@ -8451,7 +8474,8 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
opfinfo->dobj.namespace->dobj.name,
NULL,
opfinfo->rolname,
false, "OPERATOR FAMILY", q->data, delq->data, NULL,
false, "OPERATOR FAMILY", SECTION_PRE_DATA,
q->data, delq->data, NULL,
opfinfo->dobj.dependencies, opfinfo->dobj.nDeps,
NULL, NULL);
@ -8564,7 +8588,8 @@ dumpConversion(Archive *fout, ConvInfo *convinfo)
convinfo->dobj.namespace->dobj.name,
NULL,
convinfo->rolname,
false, "CONVERSION", q->data, delq->data, NULL,
false, "CONVERSION", SECTION_PRE_DATA,
q->data, delq->data, NULL,
convinfo->dobj.dependencies, convinfo->dobj.nDeps,
NULL, NULL);
@ -8805,7 +8830,8 @@ dumpAgg(Archive *fout, AggInfo *agginfo)
agginfo->aggfn.dobj.namespace->dobj.name,
NULL,
agginfo->aggfn.rolname,
false, "AGGREGATE", q->data, delq->data, NULL,
false, "AGGREGATE", SECTION_PRE_DATA,
q->data, delq->data, NULL,
agginfo->aggfn.dobj.dependencies, agginfo->aggfn.dobj.nDeps,
NULL, NULL);
@ -8892,7 +8918,8 @@ dumpTSParser(Archive *fout, TSParserInfo *prsinfo)
prsinfo->dobj.namespace->dobj.name,
NULL,
"",
false, "TEXT SEARCH PARSER", q->data, delq->data, NULL,
false, "TEXT SEARCH PARSER", SECTION_PRE_DATA,
q->data, delq->data, NULL,
prsinfo->dobj.dependencies, prsinfo->dobj.nDeps,
NULL, NULL);
@ -8981,7 +9008,8 @@ dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo)
dictinfo->dobj.namespace->dobj.name,
NULL,
dictinfo->rolname,
false, "TEXT SEARCH DICTIONARY", q->data, delq->data, NULL,
false, "TEXT SEARCH DICTIONARY", SECTION_PRE_DATA,
q->data, delq->data, NULL,
dictinfo->dobj.dependencies, dictinfo->dobj.nDeps,
NULL, NULL);
@ -9040,7 +9068,8 @@ dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo)
tmplinfo->dobj.namespace->dobj.name,
NULL,
"",
false, "TEXT SEARCH TEMPLATE", q->data, delq->data, NULL,
false, "TEXT SEARCH TEMPLATE", SECTION_PRE_DATA,
q->data, delq->data, NULL,
tmplinfo->dobj.dependencies, tmplinfo->dobj.nDeps,
NULL, NULL);
@ -9170,7 +9199,8 @@ dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo)
cfginfo->dobj.namespace->dobj.name,
NULL,
cfginfo->rolname,
false, "TEXT SEARCH CONFIGURATION", q->data, delq->data, NULL,
false, "TEXT SEARCH CONFIGURATION", SECTION_PRE_DATA,
q->data, delq->data, NULL,
cfginfo->dobj.dependencies, cfginfo->dobj.nDeps,
NULL, NULL);
@ -9220,7 +9250,8 @@ dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo)
NULL,
NULL,
fdwinfo->rolname,
false, "FOREIGN DATA WRAPPER", q->data, delq->data, NULL,
false, "FOREIGN DATA WRAPPER", SECTION_PRE_DATA,
q->data, delq->data, NULL,
fdwinfo->dobj.dependencies, fdwinfo->dobj.nDeps,
NULL, NULL);
@ -9298,7 +9329,8 @@ dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo)
NULL,
NULL,
srvinfo->rolname,
false, "SERVER", q->data, delq->data, NULL,
false, "SERVER", SECTION_PRE_DATA,
q->data, delq->data, NULL,
srvinfo->dobj.dependencies, srvinfo->dobj.nDeps,
NULL, NULL);
@ -9393,8 +9425,8 @@ dumpUserMappings(Archive *fout, const char *target,
namespace,
NULL,
owner, false,
"USER MAPPING", q->data,
delq->data, NULL,
"USER MAPPING", SECTION_PRE_DATA,
q->data, delq->data, NULL,
&dumpId, 1,
NULL, NULL);
}
@ -9447,7 +9479,8 @@ dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
tag, nspname,
NULL,
owner ? owner : "",
false, "ACL", sql->data, "", NULL,
false, "ACL", SECTION_NONE,
sql->data, "", NULL,
&(objDumpId), 1,
NULL, NULL);
@ -9797,7 +9830,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
(tbinfo->relkind == RELKIND_VIEW) ? NULL : tbinfo->reltablespace,
tbinfo->rolname,
(strcmp(reltypename, "TABLE") == 0) ? tbinfo->hasoids : false,
reltypename, q->data, delq->data, NULL,
reltypename, SECTION_PRE_DATA,
q->data, delq->data, NULL,
tbinfo->dobj.dependencies, tbinfo->dobj.nDeps,
NULL, NULL);
@ -9863,7 +9897,8 @@ dumpAttrDef(Archive *fout, AttrDefInfo *adinfo)
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "DEFAULT", q->data, delq->data, NULL,
false, "DEFAULT", SECTION_PRE_DATA,
q->data, delq->data, NULL,
adinfo->dobj.dependencies, adinfo->dobj.nDeps,
NULL, NULL);
@ -9956,7 +9991,8 @@ dumpIndex(Archive *fout, IndxInfo *indxinfo)
tbinfo->dobj.namespace->dobj.name,
indxinfo->tablespace,
tbinfo->rolname, false,
"INDEX", q->data, delq->data, NULL,
"INDEX", SECTION_POST_DATA,
q->data, delq->data, NULL,
indxinfo->dobj.dependencies, indxinfo->dobj.nDeps,
NULL, NULL);
}
@ -10059,7 +10095,8 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo)
tbinfo->dobj.namespace->dobj.name,
indxinfo->tablespace,
tbinfo->rolname, false,
"CONSTRAINT", q->data, delq->data, NULL,
"CONSTRAINT", SECTION_POST_DATA,
q->data, delq->data, NULL,
coninfo->dobj.dependencies, coninfo->dobj.nDeps,
NULL, NULL);
}
@ -10091,7 +10128,8 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo)
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname, false,
"FK CONSTRAINT", q->data, delq->data, NULL,
"FK CONSTRAINT", SECTION_POST_DATA,
q->data, delq->data, NULL,
coninfo->dobj.dependencies, coninfo->dobj.nDeps,
NULL, NULL);
}
@ -10125,7 +10163,8 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo)
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname, false,
"CHECK CONSTRAINT", q->data, delq->data, NULL,
"CHECK CONSTRAINT", SECTION_POST_DATA,
q->data, delq->data, NULL,
coninfo->dobj.dependencies, coninfo->dobj.nDeps,
NULL, NULL);
}
@ -10160,7 +10199,8 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo)
tinfo->dobj.namespace->dobj.name,
NULL,
tinfo->rolname, false,
"CHECK CONSTRAINT", q->data, delq->data, NULL,
"CHECK CONSTRAINT", SECTION_POST_DATA,
q->data, delq->data, NULL,
coninfo->dobj.dependencies, coninfo->dobj.nDeps,
NULL, NULL);
}
@ -10433,7 +10473,8 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "SEQUENCE", query->data, delqry->data, NULL,
false, "SEQUENCE", SECTION_PRE_DATA,
query->data, delqry->data, NULL,
tbinfo->dobj.dependencies, tbinfo->dobj.nDeps,
NULL, NULL);
@ -10468,7 +10509,8 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "SEQUENCE OWNED BY", query->data, "", NULL,
false, "SEQUENCE OWNED BY", SECTION_PRE_DATA,
query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
}
@ -10495,7 +10537,8 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "SEQUENCE SET", query->data, "", NULL,
false, "SEQUENCE SET", SECTION_PRE_DATA,
query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
}
@ -10691,7 +10734,8 @@ dumpTrigger(Archive *fout, TriggerInfo *tginfo)
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname, false,
"TRIGGER", query->data, delqry->data, NULL,
"TRIGGER", SECTION_POST_DATA,
query->data, delqry->data, NULL,
tginfo->dobj.dependencies, tginfo->dobj.nDeps,
NULL, NULL);
@ -10810,7 +10854,8 @@ dumpRule(Archive *fout, RuleInfo *rinfo)
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname, false,
"RULE", cmd->data, delcmd->data, NULL,
"RULE", SECTION_POST_DATA,
cmd->data, delcmd->data, NULL,
rinfo->dobj.dependencies, rinfo->dobj.nDeps,
NULL, NULL);

View File

@ -34,7 +34,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_restore.c,v 1.91 2009/01/06 17:18:11 momjian Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_restore.c,v 1.92 2009/02/02 20:07:37 adunstan Exp $
*
*-------------------------------------------------------------------------
*/
@ -93,6 +93,7 @@ main(int argc, char **argv)
{"ignore-version", 0, NULL, 'i'},
{"index", 1, NULL, 'I'},
{"list", 0, NULL, 'l'},
{"multi-thread", 1, NULL, 'm'},
{"no-privileges", 0, NULL, 'x'},
{"no-acl", 0, NULL, 'x'},
{"no-owner", 0, NULL, 'O'},
@ -141,7 +142,7 @@ main(int argc, char **argv)
}
}
while ((c = getopt_long(argc, argv, "acCd:ef:F:h:iI:lL:n:Op:P:RsS:t:T:U:vWxX:1",
while ((c = getopt_long(argc, argv, "acCd:ef:F:h:iI:lL:m:n:Op:P:RsS:t:T:U:vWxX:1",
cmdopts, NULL)) != -1)
{
switch (c)
@ -184,6 +185,10 @@ main(int argc, char **argv)
opts->tocFile = strdup(optarg);
break;
case 'm': /* number of restore threads */
opts->number_of_threads = atoi(optarg);
break;
case 'n': /* Dump data for this schema only */
opts->schemaNames = strdup(optarg);
break;
@ -269,7 +274,10 @@ main(int argc, char **argv)
break;
case 0:
/* This covers the long options equivalent to -X xxx. */
/*
* This covers the long options without a short equivalent,
* including those equivalent to -X xxx.
*/
break;
case 2: /* SET ROLE */
@ -301,6 +309,14 @@ main(int argc, char **argv)
opts->useDB = 1;
}
/* Can't do single-txn mode with multiple connections */
if (opts->single_txn && opts->number_of_threads > 1)
{
fprintf(stderr, _("%s: cannot specify both --single-transaction and multiple threads\n"),
progname);
exit(1);
}
opts->disable_triggers = disable_triggers;
opts->noDataForFailedTables = no_data_for_failed_tables;
opts->noTablespace = outputNoTablespaces;
@ -308,10 +324,8 @@ main(int argc, char **argv)
if (opts->formatName)
{
switch (opts->formatName[0])
{
case 'c':
case 'C':
opts->format = archCustom;
@ -396,6 +410,7 @@ usage(const char *progname)
printf(_(" -I, --index=NAME restore named index\n"));
printf(_(" -L, --use-list=FILENAME use specified table of contents for ordering\n"
" output from this file\n"));
printf(_(" -m, --multi-thread=NUM use this many parallel connections to restore\n"));
printf(_(" -n, --schema=NAME restore only objects in this schema\n"));
printf(_(" -O, --no-owner skip restoration of object ownership\n"));
printf(_(" -P, --function=NAME(args)\n"