2000-07-04 16:25:28 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_backup_archiver.c
|
|
|
|
*
|
|
|
|
* Private implementation of the archiver routines.
|
|
|
|
*
|
|
|
|
* See the headers to pg_restore for more details.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2000, Philip Warner
|
|
|
|
* Rights are granted to use this software in any way so long
|
2001-03-22 05:01:46 +01:00
|
|
|
* as this notice is not removed.
|
2000-07-04 16:25:28 +02:00
|
|
|
*
|
|
|
|
* The author is not responsible for loss or damages that may
|
2002-08-10 18:57:32 +02:00
|
|
|
* result from its use.
|
2000-07-04 16:25:28 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/bin/pg_dump/pg_backup_archiver.c
|
2002-01-18 18:13:51 +01:00
|
|
|
*
|
2000-07-04 16:25:28 +02:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
#include "pg_backup_db.h"
|
2013-03-27 17:10:40 +01:00
|
|
|
#include "pg_backup_utils.h"
|
2013-03-24 16:27:20 +01:00
|
|
|
#include "parallel.h"
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2002-02-11 01:18:20 +01:00
|
|
|
#include <ctype.h>
|
2013-03-24 16:27:20 +01:00
|
|
|
#include <fcntl.h>
|
2002-08-27 20:57:26 +02:00
|
|
|
#include <unistd.h>
|
2011-01-23 22:10:15 +01:00
|
|
|
#include <sys/stat.h>
|
2009-02-02 21:07:37 +01:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/wait.h>
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2005-01-26 20:44:43 +01:00
|
|
|
#ifdef WIN32
|
|
|
|
#include <io.h>
|
|
|
|
#endif
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
#include "libpq/libpq-fs.h"
|
2002-08-18 11:36:26 +02:00
|
|
|
|
2012-01-03 22:02:49 +01:00
|
|
|
#define TEXT_DUMP_HEADER "--\n-- PostgreSQL database dump\n--\n\n"
|
|
|
|
#define TEXT_DUMPALL_HEADER "--\n-- PostgreSQL database cluster dump\n--\n\n"
|
|
|
|
|
2011-01-22 23:56:42 +01:00
|
|
|
/* state needed to save/restore an archive's output target */
|
|
|
|
typedef struct _outputContext
|
|
|
|
{
|
|
|
|
void *OF;
|
|
|
|
int gzOut;
|
|
|
|
} OutputContext;
|
|
|
|
|
2012-07-25 06:02:49 +02:00
|
|
|
/* translator: this is a module name */
|
2007-03-18 17:50:44 +01:00
|
|
|
static const char *modulename = gettext_noop("archiver");
|
2003-12-06 04:00:16 +01:00
|
|
|
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
|
2013-03-24 16:27:20 +01:00
|
|
|
const int compression, ArchiveMode mode, SetupWorkerPtr setupWorkerPtr);
|
2005-01-23 01:03:54 +01:00
|
|
|
static void _getObjectDescription(PQExpBuffer buf, TocEntry *te,
|
2005-10-15 04:49:52 +02:00
|
|
|
ArchiveHandle *AH);
|
2004-08-13 23:37:28 +02:00
|
|
|
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass);
|
2012-02-23 21:53:09 +01:00
|
|
|
static char *replace_line_endings(const char *str);
|
2004-02-24 04:35:19 +01:00
|
|
|
static void _doSetFixedOutputState(ArchiveHandle *AH);
|
2002-08-18 11:36:26 +02:00
|
|
|
static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
|
2004-03-24 04:06:08 +01:00
|
|
|
static void _doSetWithOids(ArchiveHandle *AH, const bool withOids);
|
2004-09-10 22:05:18 +02:00
|
|
|
static void _reconnectToDB(ArchiveHandle *AH, const char *dbname);
|
2003-09-24 00:48:53 +02:00
|
|
|
static void _becomeUser(ArchiveHandle *AH, const char *user);
|
|
|
|
static void _becomeOwner(ArchiveHandle *AH, TocEntry *te);
|
2002-05-11 00:36:27 +02:00
|
|
|
static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName);
|
2004-11-06 20:36:02 +01:00
|
|
|
static void _selectTablespace(ArchiveHandle *AH, const char *tablespace);
|
2006-05-28 23:13:54 +02:00
|
|
|
static void processEncodingEntry(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void processStdStringsEntry(ArchiveHandle *AH, TocEntry *te);
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
static teReqs _tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt);
|
2010-02-18 02:29:10 +01:00
|
|
|
static bool _tocEntryIsACL(TocEntry *te);
|
2001-03-22 05:01:46 +01:00
|
|
|
static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
|
|
|
static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
2012-05-29 02:38:28 +02:00
|
|
|
static void buildTocEntryArrays(ArchiveHandle *AH);
|
2010-08-21 15:59:44 +02:00
|
|
|
static void _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te);
|
2001-03-22 05:01:46 +01:00
|
|
|
static int _discoverArchiveFormat(ArchiveHandle *AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
static int RestoringToDB(ArchiveHandle *AH);
|
2005-06-21 22:45:44 +02:00
|
|
|
static void dump_lo_buf(ArchiveHandle *AH);
|
2005-04-15 18:40:36 +02:00
|
|
|
static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
|
2012-02-07 22:20:29 +01:00
|
|
|
static void SetOutput(ArchiveHandle *AH, const char *filename, int compression);
|
2011-01-22 23:56:42 +01:00
|
|
|
static OutputContext SaveOutput(ArchiveHandle *AH);
|
|
|
|
static void RestoreOutput(ArchiveHandle *AH, OutputContext savedContext);
|
2005-04-15 18:40:36 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
|
2009-06-11 16:49:15 +02:00
|
|
|
RestoreOptions *ropt, bool is_parallel);
|
2013-03-24 16:27:20 +01:00
|
|
|
static void restore_toc_entries_prefork(ArchiveHandle *AH);
|
|
|
|
static void restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
|
|
|
|
TocEntry *pending_list);
|
|
|
|
static void restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list);
|
2009-08-08 00:48:34 +02:00
|
|
|
static void par_list_header_init(TocEntry *l);
|
|
|
|
static void par_list_append(TocEntry *l, TocEntry *te);
|
|
|
|
static void par_list_remove(TocEntry *te);
|
2009-02-02 21:07:37 +01:00
|
|
|
static TocEntry *get_next_work_item(ArchiveHandle *AH,
|
2009-08-08 00:48:34 +02:00
|
|
|
TocEntry *ready_list,
|
2013-03-24 16:27:20 +01:00
|
|
|
ParallelState *pstate);
|
2009-08-08 00:48:34 +02:00
|
|
|
static void mark_work_done(ArchiveHandle *AH, TocEntry *ready_list,
|
2013-03-24 16:27:20 +01:00
|
|
|
int worker, int status,
|
|
|
|
ParallelState *pstate);
|
2009-02-02 21:07:37 +01:00
|
|
|
static void fix_dependencies(ArchiveHandle *AH);
|
2009-04-12 23:02:44 +02:00
|
|
|
static bool has_lock_conflicts(TocEntry *te1, TocEntry *te2);
|
2012-05-29 02:38:28 +02:00
|
|
|
static void repoint_table_dependencies(ArchiveHandle *AH);
|
|
|
|
static void identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te);
|
2009-08-08 00:48:34 +02:00
|
|
|
static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te,
|
2010-02-26 03:01:40 +01:00
|
|
|
TocEntry *ready_list);
|
2009-02-02 21:07:37 +01:00
|
|
|
static void mark_create_done(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te);
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/*
|
2001-03-22 05:01:46 +01:00
|
|
|
* Wrapper functions.
|
|
|
|
*
|
|
|
|
* The objective it to make writing new formats and dumpers as simple
|
|
|
|
* as possible, if necessary at the expense of extra function calls etc.
|
2000-07-04 16:25:28 +02:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* The dump worker setup needs lots of knowledge of the internals of pg_dump,
|
|
|
|
* so It's defined in pg_dump.c and passed into OpenArchive. The restore worker
|
|
|
|
* setup doesn't need to know anything much, so it's defined here.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
setupRestoreWorker(Archive *AHX, RestoreOptions *ropt)
|
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
|
|
|
(AH->ReopenPtr) (AH);
|
|
|
|
}
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
/* Create a new archive */
|
|
|
|
/* Public */
|
2001-10-25 07:50:21 +02:00
|
|
|
Archive *
|
2001-03-22 05:01:46 +01:00
|
|
|
CreateArchive(const char *FileSpec, const ArchiveFormat fmt,
|
2013-03-24 16:27:20 +01:00
|
|
|
const int compression, ArchiveMode mode, SetupWorkerPtr setupDumpWorker)
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2013-03-24 16:27:20 +01:00
|
|
|
ArchiveHandle *AH = _allocAH(FileSpec, fmt, compression, mode, setupDumpWorker);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
|
|
|
return (Archive *) AH;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Open an existing archive */
|
|
|
|
/* Public */
|
2001-10-25 07:50:21 +02:00
|
|
|
Archive *
|
2001-03-22 05:01:46 +01:00
|
|
|
OpenArchive(const char *FileSpec, const ArchiveFormat fmt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2013-03-24 16:27:20 +01:00
|
|
|
ArchiveHandle *AH = _allocAH(FileSpec, fmt, 0, archModeRead, setupRestoreWorker);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
|
|
|
return (Archive *) AH;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
CloseArchive(Archive *AHX)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
int res = 0;
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
|
|
|
(*AH->ClosePtr) (AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* Close the output */
|
|
|
|
if (AH->gzOut)
|
2001-01-12 05:32:07 +01:00
|
|
|
res = GZCLOSE(AH->OF);
|
2001-03-22 05:01:46 +01:00
|
|
|
else if (AH->OF != stdout)
|
2001-01-12 05:32:07 +01:00
|
|
|
res = fclose(AH->OF);
|
|
|
|
|
|
|
|
if (res != 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not close output file: %s\n",
|
|
|
|
strerror(errno));
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
SetArchiveRestoreOptions(Archive *AHX, RestoreOptions *ropt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
TocEntry *te;
|
|
|
|
teSection curSection;
|
|
|
|
|
|
|
|
/* Save options for later access */
|
|
|
|
AH->ropt = ropt;
|
|
|
|
|
|
|
|
/* Decide which TOC entries will be dumped/restored, and mark them */
|
|
|
|
curSection = SECTION_PRE_DATA;
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
Improve pg_dump's dependency-sorting logic to enforce section dump order.
As of 9.2, with the --section option, it is very important that the concept
of "pre data", "data", and "post data" sections of the output be honored
strictly; else a dump divided into separate sectional files might be
unrestorable. However, the dependency-sorting logic knew nothing of
sections and would happily select output orderings that didn't fit that
structure. Doing so was mostly harmless before 9.2, but now we need to be
sure it doesn't do that. To fix, create dummy objects representing the
section boundaries and add dependencies between them and all the normal
objects. (This might sound expensive but it seems to only add a percent or
two to pg_dump's runtime.)
This also fixes a problem introduced in 9.1 by the feature that allows
incomplete GROUP BY lists when a primary key is given in GROUP BY.
That means that views can depend on primary key constraints. Previously,
pg_dump would deal with that by simply emitting the primary key constraint
before the view definition (and hence before the data section of the
output). That's bad enough for simple serial restores, where creating an
index before the data is loaded works, but is undesirable for speed
reasons. But it could lead to outright failure of parallel restores, as
seen in bug #6699 from Joe Van Dyk. That happened because pg_restore would
switch into parallel mode as soon as it reached the constraint, and then
very possibly would try to emit the view definition before the primary key
was committed (as a consequence of another bug that causes the view not to
be correctly marked as depending on the constraint). Adding the section
boundary constraints forces the dependency-sorting code to break the view
into separate table and rule declarations, allowing the rule, and hence the
primary key constraint it depends on, to revert to their intended location
in the post-data section. This also somewhat accidentally works around the
bogus-dependency-marking problem, because the rule will be correctly shown
as depending on the constraint, so parallel pg_restore will now do the
right thing. (We will fix the bogus-dependency problem for real in a
separate patch, but that patch is not easily back-portable to 9.1, so the
fact that this patch is enough to dodge the only known symptom is
fortunate.)
Back-patch to 9.1, except for the hunk that adds verification that the
finished archive TOC list is in correct section order; the place where
it was convenient to add that doesn't exist in 9.1.
2012-06-26 03:19:10 +02:00
|
|
|
/*
|
|
|
|
* When writing an archive, we also take this opportunity to check
|
|
|
|
* that we have generated the entries in a sane order that respects
|
|
|
|
* the section divisions. When reading, don't complain, since buggy
|
|
|
|
* old versions of pg_dump might generate out-of-order archives.
|
|
|
|
*/
|
|
|
|
if (AH->mode != archModeRead)
|
|
|
|
{
|
|
|
|
switch (te->section)
|
|
|
|
{
|
|
|
|
case SECTION_NONE:
|
|
|
|
/* ok to be anywhere */
|
|
|
|
break;
|
|
|
|
case SECTION_PRE_DATA:
|
|
|
|
if (curSection != SECTION_PRE_DATA)
|
|
|
|
write_msg(modulename,
|
|
|
|
"WARNING: archive items not in correct section order\n");
|
|
|
|
break;
|
|
|
|
case SECTION_DATA:
|
|
|
|
if (curSection == SECTION_POST_DATA)
|
|
|
|
write_msg(modulename,
|
|
|
|
"WARNING: archive items not in correct section order\n");
|
|
|
|
break;
|
|
|
|
case SECTION_POST_DATA:
|
|
|
|
/* ok no matter which section we were in */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
exit_horribly(modulename, "unexpected section code %d\n",
|
|
|
|
(int) te->section);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (te->section != SECTION_NONE)
|
|
|
|
curSection = te->section;
|
Improve pg_dump's dependency-sorting logic to enforce section dump order.
As of 9.2, with the --section option, it is very important that the concept
of "pre data", "data", and "post data" sections of the output be honored
strictly; else a dump divided into separate sectional files might be
unrestorable. However, the dependency-sorting logic knew nothing of
sections and would happily select output orderings that didn't fit that
structure. Doing so was mostly harmless before 9.2, but now we need to be
sure it doesn't do that. To fix, create dummy objects representing the
section boundaries and add dependencies between them and all the normal
objects. (This might sound expensive but it seems to only add a percent or
two to pg_dump's runtime.)
This also fixes a problem introduced in 9.1 by the feature that allows
incomplete GROUP BY lists when a primary key is given in GROUP BY.
That means that views can depend on primary key constraints. Previously,
pg_dump would deal with that by simply emitting the primary key constraint
before the view definition (and hence before the data section of the
output). That's bad enough for simple serial restores, where creating an
index before the data is loaded works, but is undesirable for speed
reasons. But it could lead to outright failure of parallel restores, as
seen in bug #6699 from Joe Van Dyk. That happened because pg_restore would
switch into parallel mode as soon as it reached the constraint, and then
very possibly would try to emit the view definition before the primary key
was committed (as a consequence of another bug that causes the view not to
be correctly marked as depending on the constraint). Adding the section
boundary constraints forces the dependency-sorting code to break the view
into separate table and rule declarations, allowing the rule, and hence the
primary key constraint it depends on, to revert to their intended location
in the post-data section. This also somewhat accidentally works around the
bogus-dependency-marking problem, because the rule will be correctly shown
as depending on the constraint, so parallel pg_restore will now do the
right thing. (We will fix the bogus-dependency problem for real in a
separate patch, but that patch is not easily back-portable to 9.1, so the
fact that this patch is enough to dodge the only known symptom is
fortunate.)
Back-patch to 9.1, except for the hunk that adds verification that the
finished archive TOC list is in correct section order; the place where
it was convenient to add that doesn't exist in 9.1.
2012-06-26 03:19:10 +02:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
te->reqs = _tocEntryRequired(te, curSection, ropt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
|
|
|
void
|
|
|
|
RestoreArchive(Archive *AHX)
|
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
RestoreOptions *ropt = AH->ropt;
|
2011-08-29 04:27:48 +02:00
|
|
|
bool parallel_mode;
|
2005-04-15 18:40:36 +02:00
|
|
|
TocEntry *te;
|
2001-03-22 05:01:46 +01:00
|
|
|
OutputContext sav;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
AH->stage = STAGE_INITIALIZING;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-10-23 23:26:44 +02:00
|
|
|
/*
|
|
|
|
* Check for nonsensical option combinations.
|
|
|
|
*
|
2010-05-15 23:41:16 +02:00
|
|
|
* -C is not compatible with -1, because we can't create a database inside
|
2009-06-11 16:49:15 +02:00
|
|
|
* a transaction block.
|
2009-01-13 12:44:56 +01:00
|
|
|
*/
|
2010-05-15 23:41:16 +02:00
|
|
|
if (ropt->createDB && ropt->single_txn)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "-C and -1 are incompatible options\n");
|
2001-10-23 23:26:44 +02:00
|
|
|
|
2011-08-29 04:27:48 +02:00
|
|
|
/*
|
|
|
|
* If we're going to do parallel restore, there are some restrictions.
|
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
parallel_mode = (AH->public.numWorkers > 1 && ropt->useDB);
|
2011-08-29 04:27:48 +02:00
|
|
|
if (parallel_mode)
|
|
|
|
{
|
|
|
|
/* We haven't got round to making this work for all archive formats */
|
|
|
|
if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "parallel restore is not supported with this archive file format\n");
|
2011-08-29 04:27:48 +02:00
|
|
|
|
|
|
|
/* Doesn't work if the archive represents dependencies as OIDs */
|
|
|
|
if (AH->version < K_VERS_1_8)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "parallel restore is not supported with archives made by pre-8.0 pg_dump\n");
|
2011-08-29 04:27:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It's also not gonna work if we can't reopen the input file, so
|
|
|
|
* let's try that immediately.
|
|
|
|
*/
|
|
|
|
(AH->ReopenPtr) (AH);
|
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Make sure we won't need (de)compression we haven't got
|
|
|
|
*/
|
|
|
|
#ifndef HAVE_LIBZ
|
2009-06-11 16:49:15 +02:00
|
|
|
if (AH->compression != 0 && AH->PrintTocDataPtr !=NULL)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "cannot restore from compressed archive (compression not supported in this installation)\n");
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
/*
|
|
|
|
* Prepare index arrays, so we can assume we have them throughout restore.
|
|
|
|
* It's possible we already did this, though.
|
|
|
|
*/
|
|
|
|
if (AH->tocsByDumpId == NULL)
|
|
|
|
buildTocEntryArrays(AH);
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* If we're using a DB connection, then connect it.
|
|
|
|
*/
|
|
|
|
if (ropt->useDB)
|
|
|
|
{
|
2001-07-03 22:21:50 +02:00
|
|
|
ahlog(AH, 1, "connecting to database for restore\n");
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->version < K_VERS_1_3)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "direct database connections are not supported in pre-1.3 archives\n");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2013-12-29 22:17:52 +01:00
|
|
|
/*
|
|
|
|
* We don't want to guess at whether the dump will successfully
|
|
|
|
* restore; allow the attempt regardless of the version of the restore
|
|
|
|
* target.
|
|
|
|
*/
|
|
|
|
AHX->minRemoteVersion = 0;
|
2001-04-25 09:03:20 +02:00
|
|
|
AHX->maxRemoteVersion = 999999;
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
ConnectDatabase(AHX, ropt->dbname,
|
|
|
|
ropt->pghost, ropt->pgport, ropt->username,
|
2009-02-26 17:02:39 +01:00
|
|
|
ropt->promptPassword);
|
2004-08-29 07:07:03 +02:00
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If we're talking to the DB directly, don't send comments since they
|
|
|
|
* obscure SQL when displaying errors
|
2004-08-29 07:07:03 +02:00
|
|
|
*/
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
AH->noTocComments = 1;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2001-03-06 05:08:04 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Work out if we have an implied data-only restore. This can happen if
|
|
|
|
* the dump was data only or if the user has used a toc list to exclude
|
|
|
|
* all of the schema data. All we do is look for schema entries - if none
|
|
|
|
* are found then we set the dataOnly flag.
|
2001-03-06 05:08:04 +01:00
|
|
|
*
|
2001-03-22 05:01:46 +01:00
|
|
|
* We could scan for wanted TABLE entries, but that is not the same as
|
2001-03-06 05:08:04 +01:00
|
|
|
* dataOnly. At this stage, it seems unnecessary (6-Mar-2001).
|
2001-03-22 05:01:46 +01:00
|
|
|
*/
|
|
|
|
if (!ropt->dataOnly)
|
|
|
|
{
|
2005-10-15 04:49:52 +02:00
|
|
|
int impliedDataOnly = 1;
|
2005-01-25 23:44:31 +01:00
|
|
|
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if ((te->reqs & REQ_SCHEMA) != 0)
|
2001-03-22 05:01:46 +01:00
|
|
|
{ /* It's schema, and it's wanted */
|
2001-03-06 05:08:04 +01:00
|
|
|
impliedDataOnly = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (impliedDataOnly)
|
|
|
|
{
|
|
|
|
ropt->dataOnly = impliedDataOnly;
|
2001-07-03 22:21:50 +02:00
|
|
|
ahlog(AH, 1, "implied data-only restore\n");
|
2001-03-06 05:08:04 +01:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2001-03-06 05:08:04 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
2001-03-22 05:01:46 +01:00
|
|
|
* Setup the output file if necessary.
|
2007-11-15 22:14:46 +01:00
|
|
|
*/
|
2011-01-22 23:56:42 +01:00
|
|
|
sav = SaveOutput(AH);
|
2001-03-22 05:01:46 +01:00
|
|
|
if (ropt->filename || ropt->compression)
|
2011-01-22 23:56:42 +01:00
|
|
|
SetOutput(AH, ropt->filename, ropt->compression);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2002-08-18 11:36:26 +02:00
|
|
|
ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
|
2003-08-04 02:43:34 +02:00
|
|
|
|
2010-02-23 22:48:32 +01:00
|
|
|
if (AH->public.verbose)
|
|
|
|
{
|
2010-02-24 03:42:55 +01:00
|
|
|
if (AH->archiveRemoteVersion)
|
|
|
|
ahprintf(AH, "-- Dumped from database version %s\n",
|
|
|
|
AH->archiveRemoteVersion);
|
|
|
|
if (AH->archiveDumpVersion)
|
|
|
|
ahprintf(AH, "-- Dumped by pg_dump version %s\n",
|
|
|
|
AH->archiveDumpVersion);
|
2005-04-15 18:40:36 +02:00
|
|
|
dumpTimestamp(AH, "Started on", AH->createDate);
|
2010-02-24 03:42:55 +01:00
|
|
|
}
|
2005-04-15 18:40:36 +02:00
|
|
|
|
2006-02-13 22:30:19 +01:00
|
|
|
if (ropt->single_txn)
|
2006-02-15 00:30:43 +01:00
|
|
|
{
|
|
|
|
if (AH->connection)
|
|
|
|
StartTransaction(AH);
|
|
|
|
else
|
|
|
|
ahprintf(AH, "BEGIN;\n\n");
|
|
|
|
}
|
2006-02-13 22:30:19 +01:00
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
/*
|
|
|
|
* Establish important parameter values right away.
|
|
|
|
*/
|
|
|
|
_doSetFixedOutputState(AH);
|
|
|
|
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
AH->stage = STAGE_PROCESSING;
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/*
|
|
|
|
* Drop the items at the start, in reverse order
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
if (ropt->dropSchema)
|
|
|
|
{
|
2005-04-15 18:40:36 +02:00
|
|
|
for (te = AH->toc->prev; te != AH->toc; te = te->prev)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
2005-04-15 18:40:36 +02:00
|
|
|
AH->currentTE = te;
|
|
|
|
|
2012-10-20 22:58:32 +02:00
|
|
|
/*
|
|
|
|
* In createDB mode, issue a DROP *only* for the database as a
|
|
|
|
* whole. Issuing drops against anything else would be wrong,
|
|
|
|
* because at this point we're connected to the wrong database.
|
|
|
|
* Conversely, if we're not in createDB mode, we'd better not
|
|
|
|
* issue a DROP against the database at all.
|
|
|
|
*/
|
|
|
|
if (ropt->createDB)
|
|
|
|
{
|
|
|
|
if (strcmp(te->desc, "DATABASE") != 0)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (strcmp(te->desc, "DATABASE") == 0)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise, drop anything that's selected and has a dropStmt */
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0) && te->dropStmt)
|
2001-07-03 22:21:50 +02:00
|
|
|
{
|
2002-07-04 17:35:07 +02:00
|
|
|
ahlog(AH, 1, "dropping %s %s\n", te->desc, te->tag);
|
2002-05-11 00:36:27 +02:00
|
|
|
/* Select owner and schema as necessary */
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeOwner(AH, te);
|
2002-05-11 00:36:27 +02:00
|
|
|
_selectOutputSchema(AH, te->namespace);
|
2014-03-03 19:02:18 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now emit the DROP command, if the object has one. Note we
|
|
|
|
* don't necessarily emit it verbatim; at this point we add an
|
|
|
|
* appropriate IF EXISTS clause, if the user requested it.
|
|
|
|
*/
|
|
|
|
if (*te->dropStmt != '\0')
|
|
|
|
{
|
|
|
|
if (!ropt->if_exists)
|
|
|
|
{
|
2014-05-06 18:12:18 +02:00
|
|
|
/* No --if-exists? Then just use the original */
|
2014-03-03 19:02:18 +01:00
|
|
|
ahprintf(AH, "%s", te->dropStmt);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
char buffer[40];
|
|
|
|
char *mark;
|
|
|
|
char *dropStmt = pg_strdup(te->dropStmt);
|
|
|
|
char *dropStmtPtr = dropStmt;
|
2014-05-06 18:12:18 +02:00
|
|
|
PQExpBuffer ftStmt = createPQExpBuffer();
|
2014-03-03 19:02:18 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Need to inject IF EXISTS clause after ALTER TABLE
|
|
|
|
* part in ALTER TABLE .. DROP statement
|
|
|
|
*/
|
|
|
|
if (strncmp(dropStmt, "ALTER TABLE", 11) == 0)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(ftStmt,
|
|
|
|
"ALTER TABLE IF EXISTS");
|
|
|
|
dropStmt = dropStmt + 11;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ALTER TABLE..ALTER COLUMN..DROP DEFAULT does not
|
|
|
|
* support the IF EXISTS clause, and therefore we
|
|
|
|
* simply emit the original command for such objects.
|
2014-05-06 18:12:18 +02:00
|
|
|
* For other objects, we need to extract the first
|
|
|
|
* part of the DROP which includes the object type.
|
|
|
|
* Most of the time this matches te->desc, so search
|
|
|
|
* for that; however for the different kinds of
|
|
|
|
* CONSTRAINTs, we know to search for hardcoded "DROP
|
|
|
|
* CONSTRAINT" instead.
|
2014-03-03 19:02:18 +01:00
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "DEFAULT") == 0)
|
|
|
|
appendPQExpBuffer(ftStmt, "%s", dropStmt);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "FK CONSTRAINT") == 0)
|
|
|
|
strcpy(buffer, "DROP CONSTRAINT");
|
|
|
|
else
|
|
|
|
snprintf(buffer, sizeof(buffer), "DROP %s",
|
|
|
|
te->desc);
|
|
|
|
|
|
|
|
mark = strstr(dropStmt, buffer);
|
|
|
|
Assert(mark != NULL);
|
|
|
|
|
|
|
|
*mark = '\0';
|
|
|
|
appendPQExpBuffer(ftStmt, "%s%s IF EXISTS%s",
|
|
|
|
dropStmt, buffer,
|
|
|
|
mark + strlen(buffer));
|
|
|
|
}
|
|
|
|
|
|
|
|
ahprintf(AH, "%s", ftStmt->data);
|
|
|
|
|
|
|
|
destroyPQExpBuffer(ftStmt);
|
|
|
|
|
|
|
|
pg_free(dropStmtPtr);
|
|
|
|
}
|
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
}
|
2007-11-24 21:26:49 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* _selectOutputSchema may have set currSchema to reflect the effect
|
|
|
|
* of a "SET search_path" command it emitted. However, by now we may
|
|
|
|
* have dropped that schema; or it might not have existed in the first
|
|
|
|
* place. In either case the effective value of search_path will not
|
|
|
|
* be what we think. Forcibly reset currSchema so that we will
|
|
|
|
* re-establish the search_path setting when needed (after creating
|
|
|
|
* the schema).
|
|
|
|
*
|
|
|
|
* If we treated users as pg_dump'able objects then we'd need to reset
|
|
|
|
* currUser here too.
|
|
|
|
*/
|
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->currSchema = NULL;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
2009-02-02 21:07:37 +01:00
|
|
|
* In serial mode, we now process each non-ACL TOC entry.
|
|
|
|
*
|
|
|
|
* In parallel mode, turn control over to the parallel-restore logic.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
2011-08-29 04:27:48 +02:00
|
|
|
if (parallel_mode)
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
|
|
|
ParallelState *pstate;
|
|
|
|
TocEntry pending_list;
|
|
|
|
|
|
|
|
par_list_header_init(&pending_list);
|
|
|
|
|
|
|
|
/* This runs PRE_DATA items and then disconnects from the database */
|
|
|
|
restore_toc_entries_prefork(AH);
|
|
|
|
Assert(AH->connection == NULL);
|
|
|
|
|
|
|
|
/* ParallelBackupStart() will actually fork the processes */
|
|
|
|
pstate = ParallelBackupStart(AH, ropt);
|
|
|
|
restore_toc_entries_parallel(AH, pstate, &pending_list);
|
|
|
|
ParallelBackupEnd(AH, pstate);
|
|
|
|
|
|
|
|
/* reconnect the master and see if we missed something */
|
|
|
|
restore_toc_entries_postfork(AH, &pending_list);
|
|
|
|
Assert(AH->connection != NULL);
|
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
else
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
2009-02-02 21:07:37 +01:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
(void) restore_toc_entry(AH, te, ropt, false);
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2004-07-13 05:00:17 +02:00
|
|
|
/*
|
|
|
|
* Scan TOC again to output ownership commands and ACLs
|
|
|
|
*/
|
2005-04-15 18:40:36 +02:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2004-07-13 05:00:17 +02:00
|
|
|
{
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
AH->currentTE = te;
|
|
|
|
|
2010-02-18 02:29:10 +01:00
|
|
|
/* Both schema and data objects might now have ownership/ACLs */
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0)
|
2004-07-13 05:00:17 +02:00
|
|
|
{
|
2004-10-22 18:04:35 +02:00
|
|
|
ahlog(AH, 1, "setting owner and privileges for %s %s\n",
|
2004-08-13 23:37:28 +02:00
|
|
|
te->desc, te->tag);
|
2004-07-13 05:00:17 +02:00
|
|
|
_printTocEntry(AH, te, ropt, false, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-13 22:30:19 +01:00
|
|
|
if (ropt->single_txn)
|
2006-02-15 00:30:43 +01:00
|
|
|
{
|
|
|
|
if (AH->connection)
|
|
|
|
CommitTransaction(AH);
|
|
|
|
else
|
|
|
|
ahprintf(AH, "COMMIT;\n\n");
|
|
|
|
}
|
2006-02-12 05:04:32 +01:00
|
|
|
|
2005-04-15 18:40:36 +02:00
|
|
|
if (AH->public.verbose)
|
|
|
|
dumpTimestamp(AH, "Completed on", time(NULL));
|
|
|
|
|
2005-03-18 18:32:55 +01:00
|
|
|
ahprintf(AH, "--\n-- PostgreSQL database dump complete\n--\n\n");
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
2004-03-03 22:28:55 +01:00
|
|
|
* Clean up & we're done.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
AH->stage = STAGE_FINALIZING;
|
|
|
|
|
2004-03-03 22:28:55 +01:00
|
|
|
if (ropt->filename || ropt->compression)
|
2011-01-22 23:56:42 +01:00
|
|
|
RestoreOutput(AH, sav);
|
2004-03-03 22:28:55 +01:00
|
|
|
|
|
|
|
if (ropt->useDB)
|
2012-02-16 17:49:20 +01:00
|
|
|
DisconnectDatabase(&AH->public);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Restore a single TOC item. Used in both parallel and non-parallel restore;
|
|
|
|
* is_parallel is true if we are in a worker child process.
|
|
|
|
*
|
|
|
|
* Returns 0 normally, but WORKER_CREATE_DONE or WORKER_INHIBIT_DATA if
|
|
|
|
* the parallel parent has to make the corresponding status update.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
|
|
|
|
RestoreOptions *ropt, bool is_parallel)
|
|
|
|
{
|
2013-03-24 16:27:20 +01:00
|
|
|
int status = WORKER_OK;
|
2009-02-02 21:07:37 +01:00
|
|
|
teReqs reqs;
|
|
|
|
bool defnDumped;
|
|
|
|
|
|
|
|
AH->currentTE = te;
|
|
|
|
|
|
|
|
/* Work out what, if anything, we want from this entry */
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (_tocEntryIsACL(te))
|
|
|
|
reqs = 0; /* ACLs are never restored here */
|
|
|
|
else
|
|
|
|
reqs = te->reqs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore DATABASE entry unless we should create it. We must check this
|
2012-06-10 21:20:04 +02:00
|
|
|
* here, not in _tocEntryRequired, because the createDB option should not
|
|
|
|
* affect emitting a DATABASE entry to an archive file.
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
*/
|
|
|
|
if (!ropt->createDB && strcmp(te->desc, "DATABASE") == 0)
|
|
|
|
reqs = 0;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* Dump any relevant dump warnings to stderr */
|
|
|
|
if (!ropt->suppressDumpWarnings && strcmp(te->desc, "WARNING") == 0)
|
|
|
|
{
|
|
|
|
if (!ropt->dataOnly && te->defn != NULL && strlen(te->defn) != 0)
|
|
|
|
write_msg(modulename, "warning from original dump file: %s\n", te->defn);
|
|
|
|
else if (te->copyStmt != NULL && strlen(te->copyStmt) != 0)
|
|
|
|
write_msg(modulename, "warning from original dump file: %s\n", te->copyStmt);
|
|
|
|
}
|
|
|
|
|
|
|
|
defnDumped = false;
|
|
|
|
|
2009-06-11 16:49:15 +02:00
|
|
|
if ((reqs & REQ_SCHEMA) != 0) /* We want the schema */
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
ahlog(AH, 1, "creating %s %s\n", te->desc, te->tag);
|
|
|
|
|
|
|
|
_printTocEntry(AH, te, ropt, false, false);
|
|
|
|
defnDumped = true;
|
|
|
|
|
|
|
|
if (strcmp(te->desc, "TABLE") == 0)
|
|
|
|
{
|
|
|
|
if (AH->lastErrorTE == te)
|
|
|
|
{
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* We failed to create the table. If
|
|
|
|
* --no-data-for-failed-tables was given, mark the
|
|
|
|
* corresponding TABLE DATA to be ignored.
|
2009-02-02 21:07:37 +01:00
|
|
|
*
|
2009-06-11 16:49:15 +02:00
|
|
|
* In the parallel case this must be done in the parent, so we
|
|
|
|
* just set the return value.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (ropt->noDataForFailedTables)
|
|
|
|
{
|
|
|
|
if (is_parallel)
|
2013-03-24 16:27:20 +01:00
|
|
|
status = WORKER_INHIBIT_DATA;
|
2009-02-02 21:07:37 +01:00
|
|
|
else
|
|
|
|
inhibit_data_for_failed_table(AH, te);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* We created the table successfully. Mark the corresponding
|
|
|
|
* TABLE DATA for possible truncation.
|
2009-02-02 21:07:37 +01:00
|
|
|
*
|
2009-06-11 16:49:15 +02:00
|
|
|
* In the parallel case this must be done in the parent, so we
|
|
|
|
* just set the return value.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (is_parallel)
|
2013-03-24 16:27:20 +01:00
|
|
|
status = WORKER_CREATE_DONE;
|
2009-02-02 21:07:37 +01:00
|
|
|
else
|
|
|
|
mark_create_done(AH, te);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we created a DB, connect to it... */
|
|
|
|
if (strcmp(te->desc, "DATABASE") == 0)
|
|
|
|
{
|
|
|
|
ahlog(AH, 1, "connecting to new database \"%s\"\n", te->tag);
|
|
|
|
_reconnectToDB(AH, te->tag);
|
2011-11-25 21:40:51 +01:00
|
|
|
ropt->dbname = pg_strdup(te->tag);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a data component, then process it
|
|
|
|
*/
|
|
|
|
if ((reqs & REQ_DATA) != 0)
|
|
|
|
{
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* hadDumper will be set if there is genuine data component for this
|
|
|
|
* node. Otherwise, we need to check the defn field for statements
|
|
|
|
* that need to be executed in data-only restores.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (te->hadDumper)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If we can output the data, then restore it.
|
|
|
|
*/
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (AH->PrintTocDataPtr !=NULL)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
_printTocEntry(AH, te, ropt, true, false);
|
|
|
|
|
|
|
|
if (strcmp(te->desc, "BLOBS") == 0 ||
|
|
|
|
strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
|
|
{
|
2012-09-04 15:00:04 +02:00
|
|
|
ahlog(AH, 1, "processing %s\n", te->desc);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
_selectOutputSchema(AH, "pg_catalog");
|
|
|
|
|
2014-06-13 02:14:32 +02:00
|
|
|
/* Send BLOB COMMENTS data to ExecuteSimpleCommands() */
|
|
|
|
if (strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
|
|
AH->outputKind = OUTPUT_OTHERDATA;
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
(*AH->PrintTocDataPtr) (AH, te, ropt);
|
2014-06-13 02:14:32 +02:00
|
|
|
|
|
|
|
AH->outputKind = OUTPUT_SQLCMDS;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
_disableTriggersIfNecessary(AH, te, ropt);
|
|
|
|
|
|
|
|
/* Select owner and schema as necessary */
|
|
|
|
_becomeOwner(AH, te);
|
|
|
|
_selectOutputSchema(AH, te->namespace);
|
|
|
|
|
2012-09-04 15:00:04 +02:00
|
|
|
ahlog(AH, 1, "processing data for table \"%s\"\n",
|
2009-02-02 21:07:37 +01:00
|
|
|
te->tag);
|
|
|
|
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* In parallel restore, if we created the table earlier in
|
|
|
|
* the run then we wrap the COPY in a transaction and
|
2014-05-06 18:12:18 +02:00
|
|
|
* precede it with a TRUNCATE. If archiving is not on
|
|
|
|
* this prevents WAL-logging the COPY. This obtains a
|
2009-06-11 16:49:15 +02:00
|
|
|
* speedup similar to that from using single_txn mode in
|
|
|
|
* non-parallel restores.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (is_parallel && te->created)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Parallel restore is always talking directly to a
|
|
|
|
* server, so no need to see if we should issue BEGIN.
|
|
|
|
*/
|
|
|
|
StartTransaction(AH);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the server version is >= 8.4, make sure we issue
|
|
|
|
* TRUNCATE with ONLY so that child tables are not
|
|
|
|
* wiped.
|
|
|
|
*/
|
|
|
|
ahprintf(AH, "TRUNCATE TABLE %s%s;\n\n",
|
|
|
|
(PQserverVersion(AH->connection) >= 80400 ?
|
|
|
|
"ONLY " : ""),
|
|
|
|
fmtId(te->tag));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-07-28 20:06:57 +02:00
|
|
|
* If we have a copy statement, use it.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (te->copyStmt && strlen(te->copyStmt) > 0)
|
|
|
|
{
|
|
|
|
ahprintf(AH, "%s", te->copyStmt);
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
AH->outputKind = OUTPUT_COPYDATA;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
else
|
|
|
|
AH->outputKind = OUTPUT_OTHERDATA;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
(*AH->PrintTocDataPtr) (AH, te, ropt);
|
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
/*
|
|
|
|
* Terminate COPY if needed.
|
|
|
|
*/
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
if (AH->outputKind == OUTPUT_COPYDATA &&
|
|
|
|
RestoringToDB(AH))
|
|
|
|
EndDBCopyMode(AH, te);
|
|
|
|
AH->outputKind = OUTPUT_SQLCMDS;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* close out the transaction started above */
|
|
|
|
if (is_parallel && te->created)
|
|
|
|
CommitTransaction(AH);
|
|
|
|
|
|
|
|
_enableTriggersIfNecessary(AH, te, ropt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!defnDumped)
|
|
|
|
{
|
|
|
|
/* If we haven't already dumped the defn part, do so now */
|
|
|
|
ahlog(AH, 1, "executing %s %s\n", te->desc, te->tag);
|
|
|
|
_printTocEntry(AH, te, ropt, false, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
if (AH->public.n_errors > 0 && status == WORKER_OK)
|
|
|
|
status = WORKER_IGNORED_ERRORS;
|
|
|
|
|
|
|
|
return status;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Allocate a new RestoreOptions block.
|
|
|
|
* This is mainly so we can initialize it, but also for future expansion,
|
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
RestoreOptions *
|
|
|
|
NewRestoreOptions(void)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
RestoreOptions *opts;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
opts = (RestoreOptions *) pg_malloc0(sizeof(RestoreOptions));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2009-01-05 17:54:37 +01:00
|
|
|
/* set any fields that shouldn't default to zeroes */
|
2000-07-04 16:25:28 +02:00
|
|
|
opts->format = archUnknown;
|
2009-02-26 17:02:39 +01:00
|
|
|
opts->promptPassword = TRI_DEFAULT;
|
2011-12-17 01:09:38 +01:00
|
|
|
opts->dumpSections = DUMP_UNSECTIONED;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
return opts;
|
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
static void
|
|
|
|
_disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2002-05-11 00:36:27 +02:00
|
|
|
/* This hack is only needed in a data-only restore */
|
|
|
|
if (!ropt->dataOnly || !ropt->disable_triggers)
|
2000-08-01 17:51:45 +02:00
|
|
|
return;
|
|
|
|
|
2005-08-24 00:40:47 +02:00
|
|
|
ahlog(AH, 1, "disabling triggers for %s\n", te->tag);
|
|
|
|
|
2000-08-01 17:51:45 +02:00
|
|
|
/*
|
2002-09-04 22:31:48 +02:00
|
|
|
* Become superuser if possible, since they are the only ones who can
|
2005-08-24 00:40:47 +02:00
|
|
|
* disable constraint triggers. If -S was not given, assume the initial
|
|
|
|
* user identity is a superuser. (XXX would it be better to become the
|
|
|
|
* table owner?)
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeUser(AH, ropt->superuser);
|
2000-08-01 17:51:45 +02:00
|
|
|
|
|
|
|
/*
|
2005-08-24 00:40:47 +02:00
|
|
|
* Disable them.
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2005-08-24 00:40:47 +02:00
|
|
|
_selectOutputSchema(AH, te->namespace);
|
2000-08-05 11:57:46 +02:00
|
|
|
|
2005-08-24 00:40:47 +02:00
|
|
|
ahprintf(AH, "ALTER TABLE %s DISABLE TRIGGER ALL;\n\n",
|
|
|
|
fmtId(te->tag));
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
static void
|
|
|
|
_enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2002-05-11 00:36:27 +02:00
|
|
|
/* This hack is only needed in a data-only restore */
|
|
|
|
if (!ropt->dataOnly || !ropt->disable_triggers)
|
2000-08-01 17:51:45 +02:00
|
|
|
return;
|
|
|
|
|
2005-08-24 00:40:47 +02:00
|
|
|
ahlog(AH, 1, "enabling triggers for %s\n", te->tag);
|
|
|
|
|
2000-08-01 17:51:45 +02:00
|
|
|
/*
|
2002-09-04 22:31:48 +02:00
|
|
|
* Become superuser if possible, since they are the only ones who can
|
2005-08-24 00:40:47 +02:00
|
|
|
* disable constraint triggers. If -S was not given, assume the initial
|
|
|
|
* user identity is a superuser. (XXX would it be better to become the
|
|
|
|
* table owner?)
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeUser(AH, ropt->superuser);
|
2000-08-01 17:51:45 +02:00
|
|
|
|
|
|
|
/*
|
2005-08-24 00:40:47 +02:00
|
|
|
* Enable them.
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2005-08-24 00:40:47 +02:00
|
|
|
_selectOutputSchema(AH, te->namespace);
|
2002-05-11 00:36:27 +02:00
|
|
|
|
2005-08-24 00:40:47 +02:00
|
|
|
ahprintf(AH, "ALTER TABLE %s ENABLE TRIGGER ALL;\n\n",
|
|
|
|
fmtId(te->tag));
|
2000-08-01 17:51:45 +02:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
/*
|
2000-07-21 13:40:08 +02:00
|
|
|
* This is a routine that is part of the dumper interface, hence the 'Archive*' parameter.
|
2000-07-04 16:25:28 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Public */
|
2014-05-06 02:27:16 +02:00
|
|
|
void
|
2002-08-20 19:54:45 +02:00
|
|
|
WriteData(Archive *AHX, const void *data, size_t dLen)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (!AH->currToc)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "internal error -- WriteData cannot be called outside the context of a DataDumper routine\n");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
(*AH->WriteDataPtr) (AH, data, dLen);
|
|
|
|
|
|
|
|
return;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-03-22 05:01:46 +01:00
|
|
|
* Create a new TOC entry. The TOC was designed as a TOC, but is now the
|
2000-07-04 16:25:28 +02:00
|
|
|
* repository for all metadata. But the name has stuck.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Public */
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
2003-12-06 04:00:16 +01:00
|
|
|
ArchiveEntry(Archive *AHX,
|
|
|
|
CatalogId catalogId, DumpId dumpId,
|
|
|
|
const char *tag,
|
2004-11-06 20:36:02 +01:00
|
|
|
const char *namespace,
|
2005-10-15 04:49:52 +02:00
|
|
|
const char *tablespace,
|
2004-11-06 20:36:02 +01:00
|
|
|
const char *owner, bool withOids,
|
2009-02-02 21:07:37 +01:00
|
|
|
const char *desc, teSection section,
|
|
|
|
const char *defn,
|
2003-12-06 04:00:16 +01:00
|
|
|
const char *dropStmt, const char *copyStmt,
|
|
|
|
const DumpId *deps, int nDeps,
|
2001-03-22 05:01:46 +01:00
|
|
|
DataDumperPtr dumpFn, void *dumpArg)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
TocEntry *newToc;
|
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
newToc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
AH->tocCount++;
|
|
|
|
if (dumpId > AH->maxDumpId)
|
|
|
|
AH->maxDumpId = dumpId;
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
newToc->prev = AH->toc->prev;
|
|
|
|
newToc->next = AH->toc;
|
|
|
|
AH->toc->prev->next = newToc;
|
|
|
|
AH->toc->prev = newToc;
|
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
newToc->catalogId = catalogId;
|
|
|
|
newToc->dumpId = dumpId;
|
2009-02-02 21:07:37 +01:00
|
|
|
newToc->section = section;
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2011-11-25 21:40:51 +01:00
|
|
|
newToc->tag = pg_strdup(tag);
|
|
|
|
newToc->namespace = namespace ? pg_strdup(namespace) : NULL;
|
|
|
|
newToc->tablespace = tablespace ? pg_strdup(tablespace) : NULL;
|
|
|
|
newToc->owner = pg_strdup(owner);
|
2004-03-24 04:06:08 +01:00
|
|
|
newToc->withOids = withOids;
|
2011-11-25 21:40:51 +01:00
|
|
|
newToc->desc = pg_strdup(desc);
|
|
|
|
newToc->defn = pg_strdup(defn);
|
|
|
|
newToc->dropStmt = pg_strdup(dropStmt);
|
|
|
|
newToc->copyStmt = copyStmt ? pg_strdup(copyStmt) : NULL;
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
if (nDeps > 0)
|
|
|
|
{
|
2011-11-25 21:40:51 +01:00
|
|
|
newToc->dependencies = (DumpId *) pg_malloc(nDeps * sizeof(DumpId));
|
2003-12-06 04:00:16 +01:00
|
|
|
memcpy(newToc->dependencies, deps, nDeps * sizeof(DumpId));
|
|
|
|
newToc->nDeps = nDeps;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
newToc->dependencies = NULL;
|
|
|
|
newToc->nDeps = 0;
|
|
|
|
}
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
newToc->dataDumper = dumpFn;
|
|
|
|
newToc->dataDumperArg = dumpArg;
|
2003-12-06 04:00:16 +01:00
|
|
|
newToc->hadDumper = dumpFn ? true : false;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
newToc->formatData = NULL;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2004-08-29 07:07:03 +02:00
|
|
|
if (AH->ArchiveEntryPtr !=NULL)
|
2001-03-22 05:01:46 +01:00
|
|
|
(*AH->ArchiveEntryPtr) (AH, newToc);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
PrintTOCSummary(Archive *AHX, RestoreOptions *ropt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2009-02-02 21:07:37 +01:00
|
|
|
TocEntry *te;
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
teSection curSection;
|
2001-03-22 05:01:46 +01:00
|
|
|
OutputContext sav;
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
const char *fmtName;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2011-01-22 23:56:42 +01:00
|
|
|
sav = SaveOutput(AH);
|
2001-03-22 05:01:46 +01:00
|
|
|
if (ropt->filename)
|
2011-01-22 23:56:42 +01:00
|
|
|
SetOutput(AH, ropt->filename, 0 /* no compression */ );
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
ahprintf(AH, ";\n; Archive created at %s", ctime(&AH->createDate));
|
|
|
|
ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %d\n",
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->archdbname, AH->tocCount, AH->compression);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
switch (AH->format)
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
case archCustom:
|
|
|
|
fmtName = "CUSTOM";
|
|
|
|
break;
|
2013-06-15 22:07:02 +02:00
|
|
|
case archDirectory:
|
|
|
|
fmtName = "DIRECTORY";
|
|
|
|
break;
|
2000-07-21 13:40:08 +02:00
|
|
|
case archTar:
|
|
|
|
fmtName = "TAR";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fmtName = "UNKNOWN";
|
|
|
|
}
|
2000-08-01 17:51:45 +02:00
|
|
|
|
|
|
|
ahprintf(AH, "; Dump Version: %d.%d-%d\n", AH->vmaj, AH->vmin, AH->vrev);
|
2002-10-22 21:15:23 +02:00
|
|
|
ahprintf(AH, "; Format: %s\n", fmtName);
|
2002-10-27 03:52:10 +01:00
|
|
|
ahprintf(AH, "; Integer: %d bytes\n", (int) AH->intSize);
|
|
|
|
ahprintf(AH, "; Offset: %d bytes\n", (int) AH->offSize);
|
2004-11-06 20:36:02 +01:00
|
|
|
if (AH->archiveRemoteVersion)
|
|
|
|
ahprintf(AH, "; Dumped from database version: %s\n",
|
|
|
|
AH->archiveRemoteVersion);
|
|
|
|
if (AH->archiveDumpVersion)
|
|
|
|
ahprintf(AH, "; Dumped by pg_dump version: %s\n",
|
|
|
|
AH->archiveDumpVersion);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
curSection = SECTION_PRE_DATA;
|
2009-02-02 21:07:37 +01:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (te->section != SECTION_NONE)
|
|
|
|
curSection = te->section;
|
|
|
|
if (ropt->verbose ||
|
|
|
|
(_tocEntryRequired(te, curSection, ropt) & (REQ_SCHEMA | REQ_DATA)) != 0)
|
2004-10-08 17:03:26 +02:00
|
|
|
ahprintf(AH, "%d; %u %u %s %s %s %s\n", te->dumpId,
|
2003-12-06 04:00:16 +01:00
|
|
|
te->catalogId.tableoid, te->catalogId.oid,
|
2004-10-08 17:03:26 +02:00
|
|
|
te->desc, te->namespace ? te->namespace : "-",
|
|
|
|
te->tag, te->owner);
|
2009-02-02 21:07:37 +01:00
|
|
|
if (ropt->verbose && te->nDeps > 0)
|
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
int i;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
ahprintf(AH, ";\tdepends on:");
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
ahprintf(AH, " %d", te->dependencies[i]);
|
|
|
|
ahprintf(AH, "\n");
|
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (ropt->filename)
|
2011-01-22 23:56:42 +01:00
|
|
|
RestoreOutput(AH, sav);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/***********
|
|
|
|
* BLOB Archival
|
|
|
|
***********/
|
|
|
|
|
|
|
|
/* Called by a dumper to signal start of a BLOB */
|
2001-03-22 05:01:46 +01:00
|
|
|
int
|
2001-04-01 07:42:51 +02:00
|
|
|
StartBlob(Archive *AHX, Oid oid)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (!AH->StartBlobPtr)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "large-object output not supported in chosen format\n");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
(*AH->StartBlobPtr) (AH, AH->currToc, oid);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return 1;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called by a dumper to signal end of a BLOB */
|
2001-03-22 05:01:46 +01:00
|
|
|
int
|
2001-04-01 07:42:51 +02:00
|
|
|
EndBlob(Archive *AHX, Oid oid)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->EndBlobPtr)
|
|
|
|
(*AH->EndBlobPtr) (AH, AH->currToc, oid);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return 1;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**********
|
|
|
|
* BLOB Restoration
|
|
|
|
**********/
|
|
|
|
|
2000-10-31 15:20:30 +01:00
|
|
|
/*
|
2001-03-22 05:01:46 +01:00
|
|
|
* Called by a format handler before any blobs are restored
|
2000-10-31 15:20:30 +01:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
StartRestoreBlobs(ArchiveHandle *AH)
|
2000-10-31 15:20:30 +01:00
|
|
|
{
|
2006-02-15 00:30:43 +01:00
|
|
|
if (!AH->ropt->single_txn)
|
|
|
|
{
|
|
|
|
if (AH->connection)
|
|
|
|
StartTransaction(AH);
|
|
|
|
else
|
|
|
|
ahprintf(AH, "BEGIN;\n\n");
|
|
|
|
}
|
2005-06-21 22:45:44 +02:00
|
|
|
|
2000-10-31 15:20:30 +01:00
|
|
|
AH->blobCount = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-03-22 05:01:46 +01:00
|
|
|
* Called by a format handler after all blobs are restored
|
2000-10-31 15:20:30 +01:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
EndRestoreBlobs(ArchiveHandle *AH)
|
2000-10-31 15:20:30 +01:00
|
|
|
{
|
2006-02-15 00:30:43 +01:00
|
|
|
if (!AH->ropt->single_txn)
|
|
|
|
{
|
|
|
|
if (AH->connection)
|
|
|
|
CommitTransaction(AH);
|
|
|
|
else
|
|
|
|
ahprintf(AH, "COMMIT;\n\n");
|
|
|
|
}
|
2004-03-03 22:28:55 +01:00
|
|
|
|
2009-03-26 23:26:08 +01:00
|
|
|
ahlog(AH, 1, ngettext("restored %d large object\n",
|
|
|
|
"restored %d large objects\n",
|
|
|
|
AH->blobCount),
|
|
|
|
AH->blobCount);
|
2000-10-31 15:20:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Called by a format handler to initiate restoration of a blob
|
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
2009-07-21 23:46:10 +02:00
|
|
|
StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
bool old_blob_style = (AH->version < K_VERS_1_12);
|
2001-07-03 22:21:50 +02:00
|
|
|
Oid loOid;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2000-10-31 15:20:30 +01:00
|
|
|
AH->blobCount++;
|
|
|
|
|
2002-04-24 04:21:04 +02:00
|
|
|
/* Initialize the LO Buffer */
|
|
|
|
AH->lo_buf_used = 0;
|
|
|
|
|
2012-06-18 22:37:49 +02:00
|
|
|
ahlog(AH, 1, "restoring large object with OID %u\n", oid);
|
2005-06-21 22:45:44 +02:00
|
|
|
|
2010-02-18 02:29:10 +01:00
|
|
|
/* With an old archive we must do drop and create logic here */
|
|
|
|
if (old_blob_style && drop)
|
2009-12-14 01:39:11 +01:00
|
|
|
DropBlobIfExists(AH, oid);
|
2009-07-21 23:46:10 +02:00
|
|
|
|
2005-06-21 22:45:44 +02:00
|
|
|
if (AH->connection)
|
2000-10-31 15:20:30 +01:00
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
if (old_blob_style)
|
|
|
|
{
|
|
|
|
loOid = lo_create(AH->connection, oid);
|
|
|
|
if (loOid == 0 || loOid != oid)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not create large object %u: %s",
|
|
|
|
oid, PQerrorMessage(AH->connection));
|
2010-02-18 02:29:10 +01:00
|
|
|
}
|
2005-06-21 22:45:44 +02:00
|
|
|
AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
|
|
|
|
if (AH->loFd == -1)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not open large object %u: %s",
|
|
|
|
oid, PQerrorMessage(AH->connection));
|
2005-06-21 22:45:44 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
if (old_blob_style)
|
|
|
|
ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
|
|
|
|
oid, INV_WRITE);
|
|
|
|
else
|
|
|
|
ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n",
|
|
|
|
oid, INV_WRITE);
|
2000-10-31 15:20:30 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->writingBlob = 1;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
2001-04-01 07:42:51 +02:00
|
|
|
EndRestoreBlob(ArchiveHandle *AH, Oid oid)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2002-05-29 03:38:56 +02:00
|
|
|
if (AH->lo_buf_used > 0)
|
|
|
|
{
|
|
|
|
/* Write remaining bytes from the LO buffer */
|
2005-06-21 22:45:44 +02:00
|
|
|
dump_lo_buf(AH);
|
2002-05-29 03:38:56 +02:00
|
|
|
}
|
2002-04-24 04:21:04 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->writingBlob = 0;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2005-06-21 22:45:44 +02:00
|
|
|
if (AH->connection)
|
2000-10-31 15:20:30 +01:00
|
|
|
{
|
2005-06-21 22:45:44 +02:00
|
|
|
lo_close(AH->connection, AH->loFd);
|
|
|
|
AH->loFd = -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-07-21 23:46:10 +02:00
|
|
|
ahprintf(AH, "SELECT pg_catalog.lo_close(0);\n\n");
|
2000-10-31 15:20:30 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/***********
|
|
|
|
* Sorting and Reordering
|
|
|
|
***********/
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
SortTocFromFile(Archive *AHX, RestoreOptions *ropt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
FILE *fh;
|
2011-04-07 17:40:23 +02:00
|
|
|
char buf[100];
|
|
|
|
bool incomplete_line;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
|
|
|
/* Allocate space for the 'wanted' array, and init it */
|
2011-11-25 21:40:51 +01:00
|
|
|
ropt->idWanted = (bool *) pg_malloc(sizeof(bool) * AH->maxDumpId);
|
2003-12-06 04:00:16 +01:00
|
|
|
memset(ropt->idWanted, 0, sizeof(bool) * AH->maxDumpId);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* Setup the file */
|
|
|
|
fh = fopen(ropt->tocFile, PG_BINARY_R);
|
|
|
|
if (!fh)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not open TOC file \"%s\": %s\n",
|
|
|
|
ropt->tocFile, strerror(errno));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2011-04-07 17:40:23 +02:00
|
|
|
incomplete_line = false;
|
2005-05-17 19:30:29 +02:00
|
|
|
while (fgets(buf, sizeof(buf), fh) != NULL)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2011-04-07 17:40:23 +02:00
|
|
|
bool prev_incomplete_line = incomplete_line;
|
|
|
|
int buflen;
|
|
|
|
char *cmnt;
|
|
|
|
char *endptr;
|
|
|
|
DumpId id;
|
|
|
|
TocEntry *te;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some lines in the file might be longer than sizeof(buf). This is
|
|
|
|
* no problem, since we only care about the leading numeric ID which
|
|
|
|
* can be at most a few characters; but we have to skip continuation
|
|
|
|
* bufferloads when processing a long line.
|
|
|
|
*/
|
|
|
|
buflen = strlen(buf);
|
|
|
|
if (buflen > 0 && buf[buflen - 1] == '\n')
|
|
|
|
incomplete_line = false;
|
|
|
|
else
|
|
|
|
incomplete_line = true;
|
|
|
|
if (prev_incomplete_line)
|
|
|
|
continue;
|
|
|
|
|
2005-05-17 19:30:29 +02:00
|
|
|
/* Truncate line at comment, if any */
|
2001-03-22 05:01:46 +01:00
|
|
|
cmnt = strchr(buf, ';');
|
|
|
|
if (cmnt != NULL)
|
|
|
|
cmnt[0] = '\0';
|
|
|
|
|
2005-05-17 19:30:29 +02:00
|
|
|
/* Ignore if all blank */
|
2010-08-21 15:59:44 +02:00
|
|
|
if (strspn(buf, " \t\r\n") == strlen(buf))
|
2001-03-22 05:01:46 +01:00
|
|
|
continue;
|
|
|
|
|
2005-05-17 19:30:29 +02:00
|
|
|
/* Get an ID, check it's valid and not already seen */
|
2001-03-22 05:01:46 +01:00
|
|
|
id = strtol(buf, &endptr, 10);
|
2005-05-17 19:30:29 +02:00
|
|
|
if (endptr == buf || id <= 0 || id > AH->maxDumpId ||
|
|
|
|
ropt->idWanted[id - 1])
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
2001-06-27 23:21:37 +02:00
|
|
|
write_msg(modulename, "WARNING: line ignored: %s\n", buf);
|
2001-03-22 05:01:46 +01:00
|
|
|
continue;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* Find TOC entry */
|
2003-12-06 04:00:16 +01:00
|
|
|
te = getTocEntryByDumpId(AH, id);
|
2001-03-22 05:01:46 +01:00
|
|
|
if (!te)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not find entry for ID %d\n",
|
|
|
|
id);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
/* Mark it wanted */
|
2003-12-06 04:00:16 +01:00
|
|
|
ropt->idWanted[id - 1] = true;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
/*
|
|
|
|
* Move each item to the end of the list as it is selected, so that
|
|
|
|
* they are placed in the desired order. Any unwanted items will end
|
|
|
|
* up at the front of the list, which may seem unintuitive but it's
|
|
|
|
* what we need. In an ordinary serial restore that makes no
|
|
|
|
* difference, but in a parallel restore we need to mark unrestored
|
|
|
|
* items' dependencies as satisfied before we start examining
|
|
|
|
* restorable items. Otherwise they could have surprising
|
|
|
|
* side-effects on the order in which restorable items actually get
|
|
|
|
* restored.
|
|
|
|
*/
|
|
|
|
_moveBefore(AH, AH->toc, te);
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (fclose(fh) != 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not close TOC file: %s\n",
|
|
|
|
strerror(errno));
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**********************
|
|
|
|
* 'Convenience functions that look like standard IO functions
|
|
|
|
* for writing data when in dump mode.
|
|
|
|
**********************/
|
|
|
|
|
|
|
|
/* Public */
|
2014-05-06 02:27:16 +02:00
|
|
|
void
|
2001-03-22 05:01:46 +01:00
|
|
|
archputs(const char *s, Archive *AH)
|
|
|
|
{
|
2014-05-06 02:27:16 +02:00
|
|
|
WriteData(AH, s, strlen(s));
|
|
|
|
return;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
2001-03-22 05:01:46 +01:00
|
|
|
int
|
|
|
|
archprintf(Archive *AH, const char *fmt,...)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2013-10-25 03:43:57 +02:00
|
|
|
char *p;
|
|
|
|
size_t len = 128; /* initial assumption about buffer size */
|
|
|
|
size_t cnt;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-10-25 03:43:57 +02:00
|
|
|
for (;;)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2013-10-25 03:43:57 +02:00
|
|
|
va_list args;
|
|
|
|
|
|
|
|
/* Allocate work buffer. */
|
|
|
|
p = (char *) pg_malloc(len);
|
|
|
|
|
|
|
|
/* Try to format the data. */
|
|
|
|
va_start(args, fmt);
|
|
|
|
cnt = pvsnprintf(p, len, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
if (cnt < len)
|
|
|
|
break; /* success */
|
|
|
|
|
|
|
|
/* Release buffer and loop around to try again with larger len. */
|
|
|
|
free(p);
|
|
|
|
len = cnt;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2013-10-25 03:43:57 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
WriteData(AH, p, cnt);
|
|
|
|
free(p);
|
2013-10-25 03:43:57 +02:00
|
|
|
return (int) cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*******************************
|
|
|
|
* Stuff below here should be 'private' to the archiver routines
|
|
|
|
*******************************/
|
|
|
|
|
2011-01-22 23:56:42 +01:00
|
|
|
static void
|
2012-02-07 22:20:29 +01:00
|
|
|
SetOutput(ArchiveHandle *AH, const char *filename, int compression)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2003-10-20 23:05:12 +02:00
|
|
|
int fn;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
|
|
|
if (filename)
|
2003-10-20 23:05:12 +02:00
|
|
|
fn = -1;
|
2001-03-22 05:01:46 +01:00
|
|
|
else if (AH->FH)
|
|
|
|
fn = fileno(AH->FH);
|
|
|
|
else if (AH->fSpec)
|
|
|
|
{
|
2003-10-20 23:05:12 +02:00
|
|
|
fn = -1;
|
2001-03-22 05:01:46 +01:00
|
|
|
filename = AH->fSpec;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
fn = fileno(stdout);
|
|
|
|
|
|
|
|
/* If compression explicitly requested, use gzopen */
|
2000-07-06 20:39:39 +02:00
|
|
|
#ifdef HAVE_LIBZ
|
2001-03-22 05:01:46 +01:00
|
|
|
if (compression != 0)
|
|
|
|
{
|
2003-10-20 23:05:12 +02:00
|
|
|
char fmode[10];
|
|
|
|
|
|
|
|
/* Don't use PG_BINARY_x since this is zlib */
|
2001-01-12 05:32:07 +01:00
|
|
|
sprintf(fmode, "wb%d", compression);
|
2003-10-20 23:05:12 +02:00
|
|
|
if (fn >= 0)
|
|
|
|
AH->OF = gzdopen(dup(fn), fmode);
|
2001-03-22 05:01:46 +01:00
|
|
|
else
|
|
|
|
AH->OF = gzopen(filename, fmode);
|
2001-01-12 05:32:07 +01:00
|
|
|
AH->gzOut = 1;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else
|
2000-07-04 16:25:28 +02:00
|
|
|
#endif
|
2003-10-20 23:05:12 +02:00
|
|
|
{ /* Use fopen */
|
2007-01-25 04:30:43 +01:00
|
|
|
if (AH->mode == archModeAppend)
|
|
|
|
{
|
|
|
|
if (fn >= 0)
|
|
|
|
AH->OF = fdopen(dup(fn), PG_BINARY_A);
|
|
|
|
else
|
|
|
|
AH->OF = fopen(filename, PG_BINARY_A);
|
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
else
|
2007-01-25 04:30:43 +01:00
|
|
|
{
|
|
|
|
if (fn >= 0)
|
|
|
|
AH->OF = fdopen(dup(fn), PG_BINARY_W);
|
|
|
|
else
|
|
|
|
AH->OF = fopen(filename, PG_BINARY_W);
|
|
|
|
}
|
2001-01-12 05:32:07 +01:00
|
|
|
AH->gzOut = 0;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-01-12 05:32:07 +01:00
|
|
|
if (!AH->OF)
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
|
|
|
if (filename)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not open output file \"%s\": %s\n",
|
|
|
|
filename, strerror(errno));
|
2007-10-28 22:55:52 +01:00
|
|
|
else
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not open output file: %s\n",
|
|
|
|
strerror(errno));
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2011-01-22 23:56:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static OutputContext
|
|
|
|
SaveOutput(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
OutputContext sav;
|
|
|
|
|
|
|
|
sav.OF = AH->OF;
|
|
|
|
sav.gzOut = AH->gzOut;
|
2001-01-12 05:32:07 +01:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return sav;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2006-07-18 19:42:01 +02:00
|
|
|
static void
|
2011-01-22 23:56:42 +01:00
|
|
|
RestoreOutput(ArchiveHandle *AH, OutputContext savedContext)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
int res;
|
2001-01-12 05:32:07 +01:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->gzOut)
|
2001-01-12 05:32:07 +01:00
|
|
|
res = GZCLOSE(AH->OF);
|
2001-03-22 05:01:46 +01:00
|
|
|
else
|
2001-01-12 05:32:07 +01:00
|
|
|
res = fclose(AH->OF);
|
|
|
|
|
|
|
|
if (res != 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not close output file: %s\n",
|
2012-06-10 21:20:04 +02:00
|
|
|
strerror(errno));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2011-01-22 23:56:42 +01:00
|
|
|
AH->gzOut = savedContext.gzOut;
|
|
|
|
AH->OF = savedContext.OF;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2001-03-22 05:01:46 +01:00
|
|
|
* Print formatted text to the output file (usually stdout).
|
2000-07-04 16:25:28 +02:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
int
|
|
|
|
ahprintf(ArchiveHandle *AH, const char *fmt,...)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2013-10-25 03:43:57 +02:00
|
|
|
char *p;
|
|
|
|
size_t len = 128; /* initial assumption about buffer size */
|
|
|
|
size_t cnt;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-10-25 03:43:57 +02:00
|
|
|
for (;;)
|
2001-02-23 23:52:32 +01:00
|
|
|
{
|
2013-10-25 03:43:57 +02:00
|
|
|
va_list args;
|
|
|
|
|
|
|
|
/* Allocate work buffer. */
|
|
|
|
p = (char *) pg_malloc(len);
|
|
|
|
|
|
|
|
/* Try to format the data. */
|
|
|
|
va_start(args, fmt);
|
|
|
|
cnt = pvsnprintf(p, len, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
if (cnt < len)
|
|
|
|
break; /* success */
|
|
|
|
|
|
|
|
/* Release buffer and loop around to try again with larger len. */
|
|
|
|
free(p);
|
|
|
|
len = cnt;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2013-10-25 03:43:57 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
ahwrite(p, 1, cnt, AH);
|
|
|
|
free(p);
|
2013-10-25 03:43:57 +02:00
|
|
|
return (int) cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
ahlog(ArchiveHandle *AH, int level, const char *fmt,...)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
if (AH->debugLevel < level && (!AH->public.verbose || level > 1))
|
|
|
|
return;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2011-11-30 02:41:06 +01:00
|
|
|
vwrite_msg(NULL, fmt, ap);
|
2000-07-21 13:40:08 +02:00
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2000-07-24 08:24:26 +02:00
|
|
|
/*
|
|
|
|
* Single place for logic which says 'We are restoring to a direct DB connection'.
|
|
|
|
*/
|
2006-07-18 19:42:01 +02:00
|
|
|
static int
|
2001-03-22 05:01:46 +01:00
|
|
|
RestoringToDB(ArchiveHandle *AH)
|
2000-07-24 08:24:26 +02:00
|
|
|
{
|
|
|
|
return (AH->ropt && AH->ropt->useDB && AH->connection);
|
|
|
|
}
|
|
|
|
|
2005-06-21 22:45:44 +02:00
|
|
|
/*
|
|
|
|
* Dump the current contents of the LO data buffer while writing a BLOB
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
dump_lo_buf(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
if (AH->connection)
|
|
|
|
{
|
|
|
|
size_t res;
|
|
|
|
|
|
|
|
res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used);
|
2009-03-26 23:26:08 +01:00
|
|
|
ahlog(AH, 5, ngettext("wrote %lu byte of large object data (result = %lu)\n",
|
2009-06-11 16:49:15 +02:00
|
|
|
"wrote %lu bytes of large object data (result = %lu)\n",
|
2009-03-26 23:26:08 +01:00
|
|
|
AH->lo_buf_used),
|
2005-06-21 22:45:44 +02:00
|
|
|
(unsigned long) AH->lo_buf_used, (unsigned long) res);
|
|
|
|
if (res != AH->lo_buf_used)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename,
|
2005-10-15 04:49:52 +02:00
|
|
|
"could not write to large object (result: %lu, expected: %lu)\n",
|
|
|
|
(unsigned long) res, (unsigned long) AH->lo_buf_used);
|
2005-06-21 22:45:44 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-08-04 23:56:09 +02:00
|
|
|
PQExpBuffer buf = createPQExpBuffer();
|
2005-06-21 22:45:44 +02:00
|
|
|
|
2009-08-04 23:56:09 +02:00
|
|
|
appendByteaLiteralAHX(buf,
|
|
|
|
(const unsigned char *) AH->lo_buf,
|
|
|
|
AH->lo_buf_used,
|
|
|
|
AH);
|
2005-06-21 22:45:44 +02:00
|
|
|
|
|
|
|
/* Hack: turn off writingBlob so ahwrite doesn't recurse to here */
|
|
|
|
AH->writingBlob = 0;
|
2009-08-04 23:56:09 +02:00
|
|
|
ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data);
|
2005-06-21 22:45:44 +02:00
|
|
|
AH->writingBlob = 1;
|
|
|
|
|
2009-08-04 23:56:09 +02:00
|
|
|
destroyPQExpBuffer(buf);
|
2005-06-21 22:45:44 +02:00
|
|
|
}
|
|
|
|
AH->lo_buf_used = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/*
|
2011-07-28 20:06:57 +02:00
|
|
|
* Write buffer to the output file (usually stdout). This is used for
|
2001-03-22 05:01:46 +01:00
|
|
|
* outputting 'restore' scripts etc. It is even possible for an archive
|
|
|
|
* format to create a custom output routine to 'fake' a restore if it
|
2000-07-21 13:40:08 +02:00
|
|
|
* wants to generate a script (see TAR output).
|
2000-07-04 16:25:28 +02:00
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
void
|
2001-03-22 05:01:46 +01:00
|
|
|
ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2014-05-06 18:12:18 +02:00
|
|
|
int bytes_written = 0;
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->writingBlob)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2005-10-15 04:49:52 +02:00
|
|
|
size_t remaining = size * nmemb;
|
2005-06-21 22:45:44 +02:00
|
|
|
|
|
|
|
while (AH->lo_buf_used + remaining > AH->lo_buf_size)
|
2002-08-20 19:54:45 +02:00
|
|
|
{
|
2005-06-21 22:45:44 +02:00
|
|
|
size_t avail = AH->lo_buf_size - AH->lo_buf_used;
|
|
|
|
|
|
|
|
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, avail);
|
|
|
|
ptr = (const void *) ((const char *) ptr + avail);
|
|
|
|
remaining -= avail;
|
|
|
|
AH->lo_buf_used += avail;
|
|
|
|
dump_lo_buf(AH);
|
2002-08-20 19:54:45 +02:00
|
|
|
}
|
|
|
|
|
2005-06-21 22:45:44 +02:00
|
|
|
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining);
|
|
|
|
AH->lo_buf_used += remaining;
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
bytes_written = size * nmemb;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
else if (AH->gzOut)
|
2014-05-06 02:27:16 +02:00
|
|
|
bytes_written = GZWRITE(ptr, size, nmemb, AH->OF);
|
2001-03-22 05:01:46 +01:00
|
|
|
else if (AH->CustomOutPtr)
|
2014-05-06 02:27:16 +02:00
|
|
|
bytes_written = AH->CustomOutPtr (AH, ptr, size * nmemb);
|
2014-05-06 18:12:18 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2001-03-22 05:01:46 +01:00
|
|
|
* If we're doing a restore, and it's direct to DB, and we're
|
|
|
|
* connected then send it to the DB.
|
|
|
|
*/
|
2000-07-24 08:24:26 +02:00
|
|
|
if (RestoringToDB(AH))
|
2014-05-06 18:12:18 +02:00
|
|
|
bytes_written = ExecuteSqlCommandBuf(AH, (const char *) ptr, size * nmemb);
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
2014-05-06 02:27:16 +02:00
|
|
|
bytes_written = fwrite(ptr, size, nmemb, AH->OF) * size;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
2014-05-06 02:27:16 +02:00
|
|
|
|
|
|
|
if (bytes_written != size * nmemb)
|
|
|
|
WRITE_ERROR_EXIT;
|
|
|
|
|
|
|
|
return;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2004-04-22 04:39:10 +02:00
|
|
|
/* on some error, we may decide to go on... */
|
|
|
|
void
|
2012-03-20 22:38:11 +01:00
|
|
|
warn_or_exit_horribly(ArchiveHandle *AH,
|
2012-06-10 21:20:04 +02:00
|
|
|
const char *modulename, const char *fmt,...)
|
2004-04-22 04:39:10 +02:00
|
|
|
{
|
2004-08-29 07:07:03 +02:00
|
|
|
va_list ap;
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
|
2004-08-29 07:07:03 +02:00
|
|
|
switch (AH->stage)
|
|
|
|
{
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
|
|
|
|
case STAGE_NONE:
|
|
|
|
/* Do nothing special */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STAGE_INITIALIZING:
|
2004-08-29 07:07:03 +02:00
|
|
|
if (AH->stage != AH->lastErrorStage)
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
write_msg(modulename, "Error while INITIALIZING:\n");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STAGE_PROCESSING:
|
2004-08-29 07:07:03 +02:00
|
|
|
if (AH->stage != AH->lastErrorStage)
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
write_msg(modulename, "Error while PROCESSING TOC:\n");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STAGE_FINALIZING:
|
2004-08-29 07:07:03 +02:00
|
|
|
if (AH->stage != AH->lastErrorStage)
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
write_msg(modulename, "Error while FINALIZING:\n");
|
|
|
|
break;
|
|
|
|
}
|
2004-08-29 07:07:03 +02:00
|
|
|
if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
|
|
|
|
{
|
2005-04-15 18:40:36 +02:00
|
|
|
write_msg(modulename, "Error from TOC entry %d; %u %u %s %s %s\n",
|
|
|
|
AH->currentTE->dumpId,
|
2005-10-15 04:49:52 +02:00
|
|
|
AH->currentTE->catalogId.tableoid, AH->currentTE->catalogId.oid,
|
|
|
|
AH->currentTE->desc, AH->currentTE->tag, AH->currentTE->owner);
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
}
|
|
|
|
AH->lastErrorStage = AH->stage;
|
|
|
|
AH->lastErrorTE = AH->currentTE;
|
|
|
|
|
2004-04-22 04:39:10 +02:00
|
|
|
va_start(ap, fmt);
|
2012-03-20 22:38:11 +01:00
|
|
|
vwrite_msg(modulename, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
2004-08-20 06:20:23 +02:00
|
|
|
if (AH->public.exit_on_error)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_nicely(1);
|
2004-04-22 04:39:10 +02:00
|
|
|
else
|
|
|
|
AH->public.n_errors++;
|
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
#ifdef NOT_USED
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
static void
|
|
|
|
_moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2010-08-21 15:59:44 +02:00
|
|
|
/* Unlink te from list */
|
2001-03-22 05:01:46 +01:00
|
|
|
te->prev->next = te->next;
|
|
|
|
te->next->prev = te->prev;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
/* and insert it after "pos" */
|
2001-03-22 05:01:46 +01:00
|
|
|
te->prev = pos;
|
|
|
|
te->next = pos->next;
|
|
|
|
pos->next->prev = te;
|
|
|
|
pos->next = te;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
2010-08-21 15:59:44 +02:00
|
|
|
#endif
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
static void
|
|
|
|
_moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2010-08-21 15:59:44 +02:00
|
|
|
/* Unlink te from list */
|
2001-03-22 05:01:46 +01:00
|
|
|
te->prev->next = te->next;
|
|
|
|
te->next->prev = te->prev;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
/* and insert it before "pos" */
|
2001-03-22 05:01:46 +01:00
|
|
|
te->prev = pos->prev;
|
|
|
|
te->next = pos;
|
|
|
|
pos->prev->next = te;
|
|
|
|
pos->prev = te;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
/*
|
|
|
|
* Build index arrays for the TOC list
|
|
|
|
*
|
|
|
|
* This should be invoked only after we have created or read in all the TOC
|
|
|
|
* items.
|
|
|
|
*
|
|
|
|
* The arrays are indexed by dump ID (so entry zero is unused). Note that the
|
2014-05-06 18:12:18 +02:00
|
|
|
* array entries run only up to maxDumpId. We might see dependency dump IDs
|
2012-05-29 02:38:28 +02:00
|
|
|
* beyond that (if the dump was partial); so always check the array bound
|
|
|
|
* before trying to touch an array entry.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
buildTocEntryArrays(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
DumpId maxDumpId = AH->maxDumpId;
|
2001-03-22 05:01:46 +01:00
|
|
|
TocEntry *te;
|
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
AH->tocsByDumpId = (TocEntry **) pg_malloc0((maxDumpId + 1) * sizeof(TocEntry *));
|
|
|
|
AH->tableDataId = (DumpId *) pg_malloc0((maxDumpId + 1) * sizeof(DumpId));
|
2012-05-29 02:38:28 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
/* this check is purely paranoia, maxDumpId should be correct */
|
|
|
|
if (te->dumpId <= 0 || te->dumpId > maxDumpId)
|
2012-06-18 22:57:00 +02:00
|
|
|
exit_horribly(modulename, "bad dumpId\n");
|
2012-05-29 02:38:28 +02:00
|
|
|
|
|
|
|
/* tocsByDumpId indexes all TOCs by their dump ID */
|
|
|
|
AH->tocsByDumpId[te->dumpId] = te;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tableDataId provides the TABLE DATA item's dump ID for each TABLE
|
2014-05-06 18:12:18 +02:00
|
|
|
* TOC entry that has a DATA item. We compute this by reversing the
|
2012-05-29 02:38:28 +02:00
|
|
|
* TABLE DATA item's dependency, knowing that a TABLE DATA item has
|
|
|
|
* just one dependency and it is the TABLE item.
|
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "TABLE DATA") == 0 && te->nDeps > 0)
|
|
|
|
{
|
|
|
|
DumpId tableId = te->dependencies[0];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The TABLE item might not have been in the archive, if this was
|
|
|
|
* a data-only dump; but its dump ID should be less than its data
|
|
|
|
* item's dump ID, so there should be a place for it in the array.
|
|
|
|
*/
|
|
|
|
if (tableId <= 0 || tableId > maxDumpId)
|
2012-06-18 22:57:00 +02:00
|
|
|
exit_horribly(modulename, "bad table dumpId for TABLE DATA item\n");
|
2012-05-29 02:38:28 +02:00
|
|
|
|
|
|
|
AH->tableDataId[tableId] = te->dumpId;
|
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2012-05-29 02:38:28 +02:00
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
TocEntry *
|
2012-05-29 02:38:28 +02:00
|
|
|
getTocEntryByDumpId(ArchiveHandle *AH, DumpId id)
|
|
|
|
{
|
|
|
|
/* build index arrays if we didn't already */
|
|
|
|
if (AH->tocsByDumpId == NULL)
|
|
|
|
buildTocEntryArrays(AH);
|
|
|
|
|
|
|
|
if (id > 0 && id <= AH->maxDumpId)
|
|
|
|
return AH->tocsByDumpId[id];
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return NULL;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2005-01-25 23:44:31 +01:00
|
|
|
teReqs
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
TocIDRequired(ArchiveHandle *AH, DumpId id)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2003-12-06 04:00:16 +01:00
|
|
|
TocEntry *te = getTocEntryByDumpId(AH, id);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (!te)
|
|
|
|
return 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
return te->reqs;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
size_t
|
2007-02-19 16:05:06 +01:00
|
|
|
WriteOffset(ArchiveHandle *AH, pgoff_t o, int wasSet)
|
2002-10-22 21:15:23 +02:00
|
|
|
{
|
2003-08-04 02:43:34 +02:00
|
|
|
int off;
|
2002-10-22 21:15:23 +02:00
|
|
|
|
|
|
|
/* Save the flag */
|
|
|
|
(*AH->WriteBytePtr) (AH, wasSet);
|
|
|
|
|
2007-02-19 16:05:06 +01:00
|
|
|
/* Write out pgoff_t smallest byte first, prevents endian mismatch */
|
|
|
|
for (off = 0; off < sizeof(pgoff_t); off++)
|
2002-10-22 21:15:23 +02:00
|
|
|
{
|
2003-08-04 02:43:34 +02:00
|
|
|
(*AH->WriteBytePtr) (AH, o & 0xFF);
|
2002-10-22 21:15:23 +02:00
|
|
|
o >>= 8;
|
|
|
|
}
|
2007-02-19 16:05:06 +01:00
|
|
|
return sizeof(pgoff_t) + 1;
|
2002-10-22 21:15:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-11-15 22:14:46 +01:00
|
|
|
ReadOffset(ArchiveHandle *AH, pgoff_t * o)
|
2002-10-22 21:15:23 +02:00
|
|
|
{
|
2003-08-04 02:43:34 +02:00
|
|
|
int i;
|
|
|
|
int off;
|
|
|
|
int offsetFlg;
|
2002-10-22 21:15:23 +02:00
|
|
|
|
|
|
|
/* Initialize to zero */
|
|
|
|
*o = 0;
|
|
|
|
|
|
|
|
/* Check for old version */
|
|
|
|
if (AH->version < K_VERS_1_7)
|
|
|
|
{
|
|
|
|
/* Prior versions wrote offsets using WriteInt */
|
|
|
|
i = ReadInt(AH);
|
|
|
|
/* -1 means not set */
|
|
|
|
if (i < 0)
|
2003-08-04 02:43:34 +02:00
|
|
|
return K_OFFSET_POS_NOT_SET;
|
2002-10-22 21:15:23 +02:00
|
|
|
else if (i == 0)
|
2003-08-04 02:43:34 +02:00
|
|
|
return K_OFFSET_NO_DATA;
|
2002-10-22 21:15:23 +02:00
|
|
|
|
2007-02-19 16:05:06 +01:00
|
|
|
/* Cast to pgoff_t because it was written as an int. */
|
|
|
|
*o = (pgoff_t) i;
|
2002-10-22 21:15:23 +02:00
|
|
|
return K_OFFSET_POS_SET;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Read the flag indicating the state of the data pointer. Check if valid
|
|
|
|
* and die if not.
|
2002-10-22 21:15:23 +02:00
|
|
|
*
|
2005-11-22 19:17:34 +01:00
|
|
|
* This used to be handled by a negative or zero pointer, now we use an
|
|
|
|
* extra byte specifically for the state.
|
2002-10-22 21:15:23 +02:00
|
|
|
*/
|
|
|
|
offsetFlg = (*AH->ReadBytePtr) (AH) & 0xFF;
|
|
|
|
|
|
|
|
switch (offsetFlg)
|
|
|
|
{
|
|
|
|
case K_OFFSET_POS_NOT_SET:
|
|
|
|
case K_OFFSET_NO_DATA:
|
|
|
|
case K_OFFSET_POS_SET:
|
|
|
|
|
2003-08-04 02:43:34 +02:00
|
|
|
break;
|
2002-10-22 21:15:23 +02:00
|
|
|
|
|
|
|
default:
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "unexpected data offset flag %d\n", offsetFlg);
|
2002-10-22 21:15:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the bytes
|
|
|
|
*/
|
|
|
|
for (off = 0; off < AH->offSize; off++)
|
|
|
|
{
|
2007-02-19 16:05:06 +01:00
|
|
|
if (off < sizeof(pgoff_t))
|
|
|
|
*o |= ((pgoff_t) ((*AH->ReadBytePtr) (AH))) << (off * 8);
|
2002-10-22 21:15:23 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if ((*AH->ReadBytePtr) (AH) != 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "file offset in dump file is too large\n");
|
2002-10-22 21:15:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return offsetFlg;
|
|
|
|
}
|
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t
|
2001-03-22 05:01:46 +01:00
|
|
|
WriteInt(ArchiveHandle *AH, int i)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
int b;
|
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* This is a bit yucky, but I don't want to make the binary format very
|
|
|
|
* dependent on representation, and not knowing much about it, I write out
|
|
|
|
* a sign byte. If you change this, don't forget to change the file
|
|
|
|
* version #, and modify readInt to read the new format AS WELL AS the old
|
|
|
|
* formats.
|
2001-03-22 05:01:46 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* SIGN byte */
|
|
|
|
if (i < 0)
|
|
|
|
{
|
|
|
|
(*AH->WriteBytePtr) (AH, 1);
|
2000-07-21 13:40:08 +02:00
|
|
|
i = -i;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
(*AH->WriteBytePtr) (AH, 0);
|
|
|
|
|
|
|
|
for (b = 0; b < AH->intSize; b++)
|
|
|
|
{
|
|
|
|
(*AH->WriteBytePtr) (AH, i & 0xFF);
|
2002-05-29 03:38:56 +02:00
|
|
|
i >>= 8;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return AH->intSize + 1;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
int
|
|
|
|
ReadInt(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
int res = 0;
|
|
|
|
int bv,
|
|
|
|
b;
|
|
|
|
int sign = 0; /* Default positive */
|
|
|
|
int bitShift = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->version > K_VERS_1_0)
|
2000-07-21 13:40:08 +02:00
|
|
|
/* Read a sign byte */
|
2001-03-22 05:01:46 +01:00
|
|
|
sign = (*AH->ReadBytePtr) (AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
for (b = 0; b < AH->intSize; b++)
|
|
|
|
{
|
|
|
|
bv = (*AH->ReadBytePtr) (AH) & 0xFF;
|
2000-07-21 13:40:08 +02:00
|
|
|
if (bv != 0)
|
|
|
|
res = res + (bv << bitShift);
|
|
|
|
bitShift += 8;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (sign)
|
|
|
|
res = -res;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return res;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t
|
2001-04-01 07:42:51 +02:00
|
|
|
WriteStr(ArchiveHandle *AH, const char *c)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t res;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
if (c)
|
|
|
|
{
|
2014-05-06 18:12:18 +02:00
|
|
|
int len = strlen(c);
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
res = WriteInt(AH, len);
|
|
|
|
(*AH->WriteBufPtr) (AH, c, len);
|
|
|
|
res += len;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
res = WriteInt(AH, -1);
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return res;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
char *
|
|
|
|
ReadStr(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
char *buf;
|
|
|
|
int l;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
l = ReadInt(AH);
|
2007-08-06 03:38:15 +02:00
|
|
|
if (l < 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
buf = NULL;
|
|
|
|
else
|
|
|
|
{
|
2011-11-25 21:40:51 +01:00
|
|
|
buf = (char *) pg_malloc(l + 1);
|
2014-05-06 02:27:16 +02:00
|
|
|
(*AH->ReadBufPtr) (AH, (void *) buf, l);
|
2007-08-06 03:38:15 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
buf[l] = '\0';
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return buf;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2000-12-07 03:52:27 +01:00
|
|
|
static int
|
2001-03-22 05:01:46 +01:00
|
|
|
_discoverArchiveFormat(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
FILE *fh;
|
|
|
|
char sig[6]; /* More than enough */
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t cnt;
|
2001-03-22 05:01:46 +01:00
|
|
|
int wantClose = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-06-27 23:21:37 +02:00
|
|
|
#if 0
|
2001-10-25 07:50:21 +02:00
|
|
|
write_msg(modulename, "attempting to ascertain archive format\n");
|
2001-06-27 23:21:37 +02:00
|
|
|
#endif
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
if (AH->lookahead)
|
|
|
|
free(AH->lookahead);
|
|
|
|
|
|
|
|
AH->lookaheadSize = 512;
|
2012-10-02 21:35:10 +02:00
|
|
|
AH->lookahead = pg_malloc0(512);
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookaheadLen = 0;
|
|
|
|
AH->lookaheadPos = 0;
|
2000-07-06 20:39:39 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->fSpec)
|
|
|
|
{
|
2011-04-10 17:42:00 +02:00
|
|
|
struct stat st;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
wantClose = 1;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the specified archive is a directory. If so, check if
|
|
|
|
* there's a "toc.dat" (or "toc.dat.gz") file in it.
|
|
|
|
*/
|
|
|
|
if (stat(AH->fSpec, &st) == 0 && S_ISDIR(st.st_mode))
|
|
|
|
{
|
|
|
|
char buf[MAXPGPATH];
|
2011-04-10 17:42:00 +02:00
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "directory name too long: \"%s\"\n",
|
|
|
|
AH->fSpec);
|
2011-01-23 22:10:15 +01:00
|
|
|
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
|
|
|
|
{
|
|
|
|
AH->format = archDirectory;
|
|
|
|
return AH->format;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_LIBZ
|
|
|
|
if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "directory name too long: \"%s\"\n",
|
|
|
|
AH->fSpec);
|
2011-01-23 22:10:15 +01:00
|
|
|
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
|
|
|
|
{
|
|
|
|
AH->format = archDirectory;
|
|
|
|
return AH->format;
|
|
|
|
}
|
|
|
|
#endif
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)\n",
|
|
|
|
AH->fSpec);
|
2011-04-10 17:42:00 +02:00
|
|
|
fh = NULL; /* keep compiler quiet */
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
fh = fopen(AH->fSpec, PG_BINARY_R);
|
|
|
|
if (!fh)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not open input file \"%s\": %s\n",
|
|
|
|
AH->fSpec, strerror(errno));
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
fh = stdin;
|
2007-10-28 22:55:52 +01:00
|
|
|
if (!fh)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not open input file: %s\n",
|
|
|
|
strerror(errno));
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
if ((cnt = fread(sig, 1, 5, fh)) != 5)
|
2001-06-27 23:21:37 +02:00
|
|
|
{
|
|
|
|
if (ferror(fh))
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not read input file: %s\n", strerror(errno));
|
2001-06-27 23:21:37 +02:00
|
|
|
else
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "input file is too short (read %lu, expected 5)\n",
|
|
|
|
(unsigned long) cnt);
|
2001-06-27 23:21:37 +02:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* Save it, just in case we need it later */
|
2000-07-21 13:40:08 +02:00
|
|
|
strncpy(&AH->lookahead[0], sig, 5);
|
|
|
|
AH->lookaheadLen = 5;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (strncmp(sig, "PGDMP", 5) == 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2014-05-06 18:12:18 +02:00
|
|
|
int byteread;
|
2014-02-09 03:25:47 +01:00
|
|
|
|
2010-06-28 04:07:02 +02:00
|
|
|
/*
|
|
|
|
* Finish reading (most of) a custom-format header.
|
|
|
|
*
|
|
|
|
* NB: this code must agree with ReadHead().
|
|
|
|
*/
|
2014-02-09 03:25:47 +01:00
|
|
|
if ((byteread = fgetc(fh)) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
READ_ERROR_EXIT(fh);
|
2014-02-09 03:25:47 +01:00
|
|
|
|
|
|
|
AH->vmaj = byteread;
|
|
|
|
|
|
|
|
if ((byteread = fgetc(fh)) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
READ_ERROR_EXIT(fh);
|
2014-02-09 03:25:47 +01:00
|
|
|
|
|
|
|
AH->vmin = byteread;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
/* Save these too... */
|
|
|
|
AH->lookahead[AH->lookaheadLen++] = AH->vmaj;
|
|
|
|
AH->lookahead[AH->lookaheadLen++] = AH->vmin;
|
|
|
|
|
|
|
|
/* Check header version; varies from V1.0 */
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->vmaj > 1 || ((AH->vmaj == 1) && (AH->vmin > 0))) /* Version > 1.0 */
|
|
|
|
{
|
2014-02-09 03:25:47 +01:00
|
|
|
if ((byteread = fgetc(fh)) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
READ_ERROR_EXIT(fh);
|
2014-02-09 03:25:47 +01:00
|
|
|
|
|
|
|
AH->vrev = byteread;
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookahead[AH->lookaheadLen++] = AH->vrev;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
AH->vrev = 0;
|
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
/* Make a convenient integer <maj><min><rev>00 */
|
|
|
|
AH->version = ((AH->vmaj * 256 + AH->vmin) * 256 + AH->vrev) * 256 + 0;
|
|
|
|
|
2014-02-09 03:25:47 +01:00
|
|
|
if ((AH->intSize = fgetc(fh)) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
READ_ERROR_EXIT(fh);
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookahead[AH->lookaheadLen++] = AH->intSize;
|
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
if (AH->version >= K_VERS_1_7)
|
|
|
|
{
|
2014-02-09 03:25:47 +01:00
|
|
|
if ((AH->offSize = fgetc(fh)) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
READ_ERROR_EXIT(fh);
|
2002-10-22 21:15:23 +02:00
|
|
|
AH->lookahead[AH->lookaheadLen++] = AH->offSize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
AH->offSize = AH->intSize;
|
|
|
|
|
2014-03-02 04:14:14 +01:00
|
|
|
if ((byteread = fgetc(fh)) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
READ_ERROR_EXIT(fh);
|
2014-03-02 04:14:14 +01:00
|
|
|
|
|
|
|
AH->format = byteread;
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookahead[AH->lookaheadLen++] = AH->format;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
2012-06-10 21:20:04 +02:00
|
|
|
* *Maybe* we have a tar archive format file or a text dump ... So,
|
|
|
|
* read first 512 byte header...
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh);
|
2014-05-06 02:27:16 +02:00
|
|
|
/* read failure is checked below */
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookaheadLen += cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2012-01-03 22:02:49 +01:00
|
|
|
if (AH->lookaheadLen >= strlen(TEXT_DUMPALL_HEADER) &&
|
|
|
|
(strncmp(AH->lookahead, TEXT_DUMP_HEADER, strlen(TEXT_DUMP_HEADER)) == 0 ||
|
|
|
|
strncmp(AH->lookahead, TEXT_DUMPALL_HEADER, strlen(TEXT_DUMPALL_HEADER)) == 0))
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
/*
|
|
|
|
* looks like it's probably a text format dump. so suggest they
|
|
|
|
* try psql
|
|
|
|
*/
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "input file appears to be a text format dump. Please use psql.\n");
|
2012-01-03 22:02:49 +01:00
|
|
|
}
|
|
|
|
|
2014-05-06 16:00:57 +02:00
|
|
|
if (AH->lookaheadLen != 512)
|
|
|
|
{
|
|
|
|
if (feof(fh))
|
|
|
|
exit_horribly(modulename, "input file does not appear to be a valid archive (too short?)\n");
|
|
|
|
else
|
|
|
|
READ_ERROR_EXIT(fh);
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (!isValidTarHeader(AH->lookahead))
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "input file does not appear to be a valid archive\n");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->format = archTar;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* If we can't seek, then mark the header as read */
|
2002-08-20 19:54:45 +02:00
|
|
|
if (fseeko(fh, 0, SEEK_SET) != 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* NOTE: Formats that use the lookahead buffer can unset this in their
|
|
|
|
* Init routine.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
AH->readHeader = 1;
|
|
|
|
}
|
|
|
|
else
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->lookaheadLen = 0; /* Don't bother since we've reset the file */
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* Close the file */
|
|
|
|
if (wantClose)
|
2001-01-12 05:32:07 +01:00
|
|
|
if (fclose(fh) != 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not close input file: %s\n",
|
|
|
|
strerror(errno));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return AH->format;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an archive handle
|
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
static ArchiveHandle *
|
|
|
|
_allocAH(const char *FileSpec, const ArchiveFormat fmt,
|
2013-03-24 16:27:20 +01:00
|
|
|
const int compression, ArchiveMode mode, SetupWorkerPtr setupWorkerPtr)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-06-27 23:21:37 +02:00
|
|
|
#if 0
|
2001-10-25 07:50:21 +02:00
|
|
|
write_msg(modulename, "allocating AH for %s, format %d\n", FileSpec, fmt);
|
2001-06-27 23:21:37 +02:00
|
|
|
#endif
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
AH = (ArchiveHandle *) pg_malloc0(sizeof(ArchiveHandle));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
/* AH->debugLevel = 100; */
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->vmaj = K_VERS_MAJOR;
|
|
|
|
AH->vmin = K_VERS_MINOR;
|
2000-08-01 17:51:45 +02:00
|
|
|
AH->vrev = K_VERS_REV;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-02-18 02:29:10 +01:00
|
|
|
/* Make a convenient integer <maj><min><rev>00 */
|
|
|
|
AH->version = ((AH->vmaj * 256 + AH->vmin) * 256 + AH->vrev) * 256 + 0;
|
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
/* initialize for backwards compatible string processing */
|
2007-10-13 22:18:42 +02:00
|
|
|
AH->public.encoding = 0; /* PG_SQL_ASCII */
|
2006-05-28 23:13:54 +02:00
|
|
|
AH->public.std_strings = false;
|
|
|
|
|
|
|
|
/* sql error handling */
|
|
|
|
AH->public.exit_on_error = true;
|
|
|
|
AH->public.n_errors = 0;
|
|
|
|
|
2010-02-24 03:42:55 +01:00
|
|
|
AH->archiveDumpVersion = PG_VERSION;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->createDate = time(NULL);
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->intSize = sizeof(int);
|
2007-02-19 16:05:06 +01:00
|
|
|
AH->offSize = sizeof(pgoff_t);
|
2001-03-22 05:01:46 +01:00
|
|
|
if (FileSpec)
|
|
|
|
{
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->fSpec = pg_strdup(FileSpec);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Not used; maybe later....
|
|
|
|
*
|
2011-11-25 21:40:51 +01:00
|
|
|
* AH->workDir = pg_strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ;
|
2005-11-22 19:17:34 +01:00
|
|
|
* i--) if (AH->workDir[i-1] == '/')
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->fSpec = NULL;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->currUser = NULL; /* unknown */
|
|
|
|
AH->currSchema = NULL; /* ditto */
|
|
|
|
AH->currTablespace = NULL; /* ditto */
|
2004-03-24 04:06:08 +01:00
|
|
|
AH->currWithOids = -1; /* force SET */
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->toc->next = AH->toc;
|
|
|
|
AH->toc->prev = AH->toc;
|
|
|
|
|
|
|
|
AH->mode = mode;
|
|
|
|
AH->compression = compression;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
memset(&(AH->sqlparse), 0, sizeof(AH->sqlparse));
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* Open stdout with no compression for AH output handle */
|
|
|
|
AH->gzOut = 0;
|
|
|
|
AH->OF = stdout;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2005-01-26 20:44:43 +01:00
|
|
|
/*
|
|
|
|
* On Windows, we need to use binary mode to read/write non-text archive
|
2005-10-15 04:49:52 +02:00
|
|
|
* formats. Force stdin/stdout into binary mode if that is what we are
|
|
|
|
* using.
|
2005-01-26 20:44:43 +01:00
|
|
|
*/
|
|
|
|
#ifdef WIN32
|
2005-09-11 02:36:14 +02:00
|
|
|
if (fmt != archNull &&
|
|
|
|
(AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0))
|
2005-01-26 20:44:43 +01:00
|
|
|
{
|
|
|
|
if (mode == archModeWrite)
|
|
|
|
setmode(fileno(stdout), O_BINARY);
|
|
|
|
else
|
|
|
|
setmode(fileno(stdin), O_BINARY);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->SetupWorkerPtr = setupWorkerPtr;
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (fmt == archUnknown)
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->format = _discoverArchiveFormat(AH);
|
|
|
|
else
|
|
|
|
AH->format = fmt;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2009-02-26 17:02:39 +01:00
|
|
|
AH->promptPassword = TRI_DEFAULT;
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
switch (AH->format)
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
case archCustom:
|
|
|
|
InitArchiveFmt_Custom(AH);
|
|
|
|
break;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
case archNull:
|
|
|
|
InitArchiveFmt_Null(AH);
|
|
|
|
break;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
case archDirectory:
|
|
|
|
InitArchiveFmt_Directory(AH);
|
|
|
|
break;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
case archTar:
|
|
|
|
InitArchiveFmt_Tar(AH);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "unrecognized file format \"%d\"\n", fmt);
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return AH;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
2013-03-24 16:27:20 +01:00
|
|
|
WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2009-02-02 21:07:37 +01:00
|
|
|
TocEntry *te;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
2013-03-24 16:27:20 +01:00
|
|
|
if (!te->dataDumper)
|
|
|
|
continue;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
if ((te->reqs & REQ_DATA) == 0)
|
|
|
|
continue;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
if (pstate && pstate->numWorkers > 1)
|
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
/*
|
2013-03-24 16:27:20 +01:00
|
|
|
* If we are in a parallel backup, then we are always the master
|
|
|
|
* process.
|
2001-03-22 05:01:46 +01:00
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
EnsureIdleWorker(AH, pstate);
|
|
|
|
Assert(GetIdleWorker(pstate) != NO_SLOT);
|
|
|
|
DispatchJobForTocEntry(AH, pstate, te, ACT_DUMP);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
WriteDataChunksForTocEntry(AH, te);
|
|
|
|
}
|
|
|
|
EnsureWorkersFinished(AH, pstate);
|
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
void
|
|
|
|
WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
StartDataPtr startPtr;
|
|
|
|
EndDataPtr endPtr;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->currToc = te;
|
|
|
|
|
|
|
|
if (strcmp(te->desc, "BLOBS") == 0)
|
|
|
|
{
|
|
|
|
startPtr = AH->StartBlobsPtr;
|
|
|
|
endPtr = AH->EndBlobsPtr;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2013-03-24 16:27:20 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
startPtr = AH->StartDataPtr;
|
|
|
|
endPtr = AH->EndDataPtr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (startPtr != NULL)
|
|
|
|
(*startPtr) (AH, te);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The user-provided DataDumper routine needs to call AH->WriteData
|
|
|
|
*/
|
|
|
|
(*te->dataDumper) ((Archive *) AH, te->dataDumperArg);
|
|
|
|
|
|
|
|
if (endPtr != NULL)
|
|
|
|
(*endPtr) (AH, te);
|
|
|
|
|
|
|
|
AH->currToc = NULL;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
WriteToc(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2003-12-06 04:00:16 +01:00
|
|
|
TocEntry *te;
|
|
|
|
char workbuf[32];
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
int tocCount;
|
2001-04-01 07:42:51 +02:00
|
|
|
int i;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* count entries that will actually be dumped */
|
|
|
|
tocCount = 0;
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) != 0)
|
|
|
|
tocCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* printf("%d TOC Entries to save\n", tocCount); */
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
WriteInt(AH, tocCount);
|
2003-12-06 04:00:16 +01:00
|
|
|
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) == 0)
|
|
|
|
continue;
|
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
WriteInt(AH, te->dumpId);
|
2001-03-22 05:01:46 +01:00
|
|
|
WriteInt(AH, te->dataDumper ? 1 : 0);
|
2003-12-06 04:00:16 +01:00
|
|
|
|
|
|
|
/* OID is recorded as a string for historical reasons */
|
|
|
|
sprintf(workbuf, "%u", te->catalogId.tableoid);
|
|
|
|
WriteStr(AH, workbuf);
|
|
|
|
sprintf(workbuf, "%u", te->catalogId.oid);
|
|
|
|
WriteStr(AH, workbuf);
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2002-07-04 17:35:07 +02:00
|
|
|
WriteStr(AH, te->tag);
|
2001-03-22 05:01:46 +01:00
|
|
|
WriteStr(AH, te->desc);
|
2009-02-02 21:07:37 +01:00
|
|
|
WriteInt(AH, te->section);
|
2001-03-22 05:01:46 +01:00
|
|
|
WriteStr(AH, te->defn);
|
|
|
|
WriteStr(AH, te->dropStmt);
|
|
|
|
WriteStr(AH, te->copyStmt);
|
2002-05-11 00:36:27 +02:00
|
|
|
WriteStr(AH, te->namespace);
|
2004-11-06 20:36:02 +01:00
|
|
|
WriteStr(AH, te->tablespace);
|
2001-03-22 05:01:46 +01:00
|
|
|
WriteStr(AH, te->owner);
|
2004-03-24 04:06:08 +01:00
|
|
|
WriteStr(AH, te->withOids ? "true" : "false");
|
2001-04-01 07:42:51 +02:00
|
|
|
|
|
|
|
/* Dump list of dependencies */
|
2003-12-06 04:00:16 +01:00
|
|
|
for (i = 0; i < te->nDeps; i++)
|
2001-04-01 07:42:51 +02:00
|
|
|
{
|
2003-12-06 04:00:16 +01:00
|
|
|
sprintf(workbuf, "%d", te->dependencies[i]);
|
|
|
|
WriteStr(AH, workbuf);
|
2001-04-01 07:42:51 +02:00
|
|
|
}
|
2001-10-25 07:50:21 +02:00
|
|
|
WriteStr(AH, NULL); /* Terminate List */
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->WriteExtraTocPtr)
|
|
|
|
(*AH->WriteExtraTocPtr) (AH, te);
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
ReadToc(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
int i;
|
2003-12-06 04:00:16 +01:00
|
|
|
char *tmp;
|
|
|
|
DumpId *deps;
|
2001-04-01 07:42:51 +02:00
|
|
|
int depIdx;
|
|
|
|
int depSize;
|
2009-02-02 21:07:37 +01:00
|
|
|
TocEntry *te;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->tocCount = ReadInt(AH);
|
2003-12-06 04:00:16 +01:00
|
|
|
AH->maxDumpId = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
for (i = 0; i < AH->tocCount; i++)
|
|
|
|
{
|
2012-10-02 21:35:10 +02:00
|
|
|
te = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
2003-12-06 04:00:16 +01:00
|
|
|
te->dumpId = ReadInt(AH);
|
|
|
|
|
|
|
|
if (te->dumpId > AH->maxDumpId)
|
|
|
|
AH->maxDumpId = te->dumpId;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
/* Sanity check */
|
2003-12-06 04:00:16 +01:00
|
|
|
if (te->dumpId <= 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename,
|
2012-06-10 21:20:04 +02:00
|
|
|
"entry ID %d out of range -- perhaps a corrupt TOC\n",
|
2012-03-20 22:38:11 +01:00
|
|
|
te->dumpId);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
te->hadDumper = ReadInt(AH);
|
2003-12-06 04:00:16 +01:00
|
|
|
|
|
|
|
if (AH->version >= K_VERS_1_8)
|
|
|
|
{
|
|
|
|
tmp = ReadStr(AH);
|
|
|
|
sscanf(tmp, "%u", &te->catalogId.tableoid);
|
|
|
|
free(tmp);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
te->catalogId.tableoid = InvalidOid;
|
|
|
|
tmp = ReadStr(AH);
|
|
|
|
sscanf(tmp, "%u", &te->catalogId.oid);
|
|
|
|
free(tmp);
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2002-07-04 17:35:07 +02:00
|
|
|
te->tag = ReadStr(AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
te->desc = ReadStr(AH);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
if (AH->version >= K_VERS_1_11)
|
|
|
|
{
|
|
|
|
te->section = ReadInt(AH);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2010-02-18 02:29:10 +01:00
|
|
|
* Rules for pre-8.4 archives wherein pg_dump hasn't classified
|
|
|
|
* the entries into sections. This list need not cover entry
|
|
|
|
* types added later than 8.4.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "COMMENT") == 0 ||
|
2009-10-05 21:24:49 +02:00
|
|
|
strcmp(te->desc, "ACL") == 0 ||
|
2010-02-18 02:29:10 +01:00
|
|
|
strcmp(te->desc, "ACL LANGUAGE") == 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
te->section = SECTION_NONE;
|
|
|
|
else if (strcmp(te->desc, "TABLE DATA") == 0 ||
|
|
|
|
strcmp(te->desc, "BLOBS") == 0 ||
|
|
|
|
strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
|
|
te->section = SECTION_DATA;
|
|
|
|
else if (strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "FK CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "INDEX") == 0 ||
|
|
|
|
strcmp(te->desc, "RULE") == 0 ||
|
|
|
|
strcmp(te->desc, "TRIGGER") == 0)
|
|
|
|
te->section = SECTION_POST_DATA;
|
|
|
|
else
|
|
|
|
te->section = SECTION_PRE_DATA;
|
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
te->defn = ReadStr(AH);
|
|
|
|
te->dropStmt = ReadStr(AH);
|
|
|
|
|
|
|
|
if (AH->version >= K_VERS_1_3)
|
|
|
|
te->copyStmt = ReadStr(AH);
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
if (AH->version >= K_VERS_1_6)
|
|
|
|
te->namespace = ReadStr(AH);
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
if (AH->version >= K_VERS_1_10)
|
|
|
|
te->tablespace = ReadStr(AH);
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
te->owner = ReadStr(AH);
|
2004-03-24 04:06:08 +01:00
|
|
|
if (AH->version >= K_VERS_1_9)
|
|
|
|
{
|
|
|
|
if (strcmp(ReadStr(AH), "true") == 0)
|
|
|
|
te->withOids = true;
|
|
|
|
else
|
|
|
|
te->withOids = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
te->withOids = true;
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2001-04-01 07:42:51 +02:00
|
|
|
/* Read TOC entry dependencies */
|
|
|
|
if (AH->version >= K_VERS_1_5)
|
|
|
|
{
|
|
|
|
depSize = 100;
|
2011-11-25 21:40:51 +01:00
|
|
|
deps = (DumpId *) pg_malloc(sizeof(DumpId) * depSize);
|
2001-04-01 07:42:51 +02:00
|
|
|
depIdx = 0;
|
2003-12-06 04:00:16 +01:00
|
|
|
for (;;)
|
2001-04-01 07:42:51 +02:00
|
|
|
{
|
2003-12-06 04:00:16 +01:00
|
|
|
tmp = ReadStr(AH);
|
|
|
|
if (!tmp)
|
|
|
|
break; /* end of list */
|
2003-05-04 00:18:59 +02:00
|
|
|
if (depIdx >= depSize)
|
2001-04-01 07:42:51 +02:00
|
|
|
{
|
|
|
|
depSize *= 2;
|
2011-11-30 02:41:06 +01:00
|
|
|
deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depSize);
|
2001-04-01 07:42:51 +02:00
|
|
|
}
|
2003-12-06 04:00:16 +01:00
|
|
|
sscanf(tmp, "%d", &deps[depIdx]);
|
|
|
|
free(tmp);
|
|
|
|
depIdx++;
|
|
|
|
}
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
if (depIdx > 0) /* We have a non-null entry */
|
|
|
|
{
|
2011-11-30 02:41:06 +01:00
|
|
|
deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depIdx);
|
2003-12-06 04:00:16 +01:00
|
|
|
te->dependencies = deps;
|
|
|
|
te->nDeps = depIdx;
|
|
|
|
}
|
2001-06-27 23:21:37 +02:00
|
|
|
else
|
2003-05-04 00:18:59 +02:00
|
|
|
{
|
|
|
|
free(deps);
|
2003-12-06 04:00:16 +01:00
|
|
|
te->dependencies = NULL;
|
|
|
|
te->nDeps = 0;
|
2003-05-04 00:18:59 +02:00
|
|
|
}
|
2001-04-01 07:42:51 +02:00
|
|
|
}
|
2001-06-27 23:21:37 +02:00
|
|
|
else
|
2003-12-06 04:00:16 +01:00
|
|
|
{
|
|
|
|
te->dependencies = NULL;
|
|
|
|
te->nDeps = 0;
|
|
|
|
}
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->ReadExtraTocPtr)
|
|
|
|
(*AH->ReadExtraTocPtr) (AH, te);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
ahlog(AH, 3, "read TOC entry %d (ID %d) for %s %s\n",
|
|
|
|
i, te->dumpId, te->desc, te->tag);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
/* link completed entry into TOC circular list */
|
2000-07-21 13:40:08 +02:00
|
|
|
te->prev = AH->toc->prev;
|
|
|
|
AH->toc->prev->next = te;
|
|
|
|
AH->toc->prev = te;
|
|
|
|
te->next = AH->toc;
|
2006-05-28 23:13:54 +02:00
|
|
|
|
|
|
|
/* special processing immediately upon read for some items */
|
|
|
|
if (strcmp(te->desc, "ENCODING") == 0)
|
|
|
|
processEncodingEntry(AH, te);
|
|
|
|
else if (strcmp(te->desc, "STDSTRINGS") == 0)
|
|
|
|
processStdStringsEntry(AH, te);
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
static void
|
|
|
|
processEncodingEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
/* te->defn should have the form SET client_encoding = 'foo'; */
|
2011-11-25 21:40:51 +01:00
|
|
|
char *defn = pg_strdup(te->defn);
|
2006-05-28 23:13:54 +02:00
|
|
|
char *ptr1;
|
|
|
|
char *ptr2 = NULL;
|
|
|
|
int encoding;
|
|
|
|
|
|
|
|
ptr1 = strchr(defn, '\'');
|
|
|
|
if (ptr1)
|
|
|
|
ptr2 = strchr(++ptr1, '\'');
|
|
|
|
if (ptr2)
|
|
|
|
{
|
|
|
|
*ptr2 = '\0';
|
|
|
|
encoding = pg_char_to_encoding(ptr1);
|
|
|
|
if (encoding < 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "unrecognized encoding \"%s\"\n",
|
|
|
|
ptr1);
|
2006-05-28 23:13:54 +02:00
|
|
|
AH->public.encoding = encoding;
|
|
|
|
}
|
|
|
|
else
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "invalid ENCODING item: %s\n",
|
|
|
|
te->defn);
|
2006-05-28 23:13:54 +02:00
|
|
|
|
|
|
|
free(defn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
processStdStringsEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
/* te->defn should have the form SET standard_conforming_strings = 'x'; */
|
|
|
|
char *ptr1;
|
|
|
|
|
|
|
|
ptr1 = strchr(te->defn, '\'');
|
|
|
|
if (ptr1 && strncmp(ptr1, "'on'", 4) == 0)
|
|
|
|
AH->public.std_strings = true;
|
|
|
|
else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0)
|
|
|
|
AH->public.std_strings = false;
|
|
|
|
else
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "invalid STDSTRINGS item: %s\n",
|
|
|
|
te->defn);
|
2006-05-28 23:13:54 +02:00
|
|
|
}
|
|
|
|
|
2001-11-04 05:05:36 +01:00
|
|
|
static teReqs
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
_tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
teReqs res = REQ_SCHEMA | REQ_DATA;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* ENCODING and STDSTRINGS items are treated specially */
|
2006-05-28 23:13:54 +02:00
|
|
|
if (strcmp(te->desc, "ENCODING") == 0 ||
|
|
|
|
strcmp(te->desc, "STDSTRINGS") == 0)
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
return REQ_SPECIAL;
|
2004-02-24 04:35:19 +01:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* If it's an ACL, maybe ignore it */
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (ropt->aclsSkip && _tocEntryIsACL(te))
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-09-28 02:55:27 +02:00
|
|
|
/* If it's security labels, maybe ignore it */
|
2011-05-19 22:20:11 +02:00
|
|
|
if (ropt->no_security_labels && strcmp(te->desc, "SECURITY LABEL") == 0)
|
2010-09-28 02:55:27 +02:00
|
|
|
return 0;
|
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* Ignore it if section is not to be dumped/restored */
|
|
|
|
switch (curSection)
|
2011-12-17 01:09:38 +01:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
case SECTION_PRE_DATA:
|
|
|
|
if (!(ropt->dumpSections & DUMP_PRE_DATA))
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
case SECTION_DATA:
|
|
|
|
if (!(ropt->dumpSections & DUMP_DATA))
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
case SECTION_POST_DATA:
|
|
|
|
if (!(ropt->dumpSections & DUMP_POST_DATA))
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* shouldn't get here, really, but ignore it */
|
2011-12-17 01:09:38 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-04-13 00:18:48 +02:00
|
|
|
/* Check options for selective dump/restore */
|
2013-08-28 08:43:34 +02:00
|
|
|
if (ropt->schemaNames.head != NULL)
|
2006-04-13 00:18:48 +02:00
|
|
|
{
|
|
|
|
/* If no namespace is specified, it means all. */
|
|
|
|
if (!te->namespace)
|
|
|
|
return 0;
|
2013-08-28 08:43:34 +02:00
|
|
|
if (!(simple_string_list_member(&ropt->schemaNames, te->namespace)))
|
2006-04-13 00:18:48 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (ropt->selTypes)
|
|
|
|
{
|
2006-01-21 03:16:21 +01:00
|
|
|
if (strcmp(te->desc, "TABLE") == 0 ||
|
|
|
|
strcmp(te->desc, "TABLE DATA") == 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
if (!ropt->selTable)
|
|
|
|
return 0;
|
2013-01-17 11:24:47 +01:00
|
|
|
if (ropt->tableNames.head != NULL && (!(simple_string_list_member(&ropt->tableNames, te->tag))))
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else if (strcmp(te->desc, "INDEX") == 0)
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
if (!ropt->selIndex)
|
|
|
|
return 0;
|
2013-08-28 08:43:34 +02:00
|
|
|
if (ropt->indexNames.head != NULL && (!(simple_string_list_member(&ropt->indexNames, te->tag))))
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else if (strcmp(te->desc, "FUNCTION") == 0)
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
if (!ropt->selFunction)
|
|
|
|
return 0;
|
2013-08-28 08:43:34 +02:00
|
|
|
if (ropt->functionNames.head != NULL && (!(simple_string_list_member(&ropt->functionNames, te->tag))))
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else if (strcmp(te->desc, "TRIGGER") == 0)
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
if (!ropt->selTrigger)
|
|
|
|
return 0;
|
2013-08-28 08:43:34 +02:00
|
|
|
if (ropt->triggerNames.head != NULL && (!(simple_string_list_member(&ropt->triggerNames, te->tag))))
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
else
|
|
|
|
return 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-11-05 18:46:40 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Check if we had a dataDumper. Indicates if the entry is schema or data
|
2001-11-05 18:46:40 +01:00
|
|
|
*/
|
|
|
|
if (!te->hadDumper)
|
|
|
|
{
|
|
|
|
/*
|
2010-02-26 03:01:40 +01:00
|
|
|
* Special Case: If 'SEQUENCE SET' or anything to do with BLOBs, then
|
|
|
|
* it is considered a data entry. We don't need to check for the
|
|
|
|
* BLOBS entry or old-style BLOB COMMENTS, because they will have
|
|
|
|
* hadDumper = true ... but we do need to check new-style BLOB
|
|
|
|
* comments.
|
2001-11-05 18:46:40 +01:00
|
|
|
*/
|
2010-02-18 02:29:10 +01:00
|
|
|
if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
|
|
|
|
strcmp(te->desc, "BLOB") == 0 ||
|
|
|
|
(strcmp(te->desc, "ACL") == 0 &&
|
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0) ||
|
|
|
|
(strcmp(te->desc, "COMMENT") == 0 &&
|
2010-09-28 02:55:27 +02:00
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0) ||
|
|
|
|
(strcmp(te->desc, "SECURITY LABEL") == 0 &&
|
2010-02-18 02:29:10 +01:00
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0))
|
2001-11-05 18:46:40 +01:00
|
|
|
res = res & REQ_DATA;
|
|
|
|
else
|
2001-11-04 05:05:36 +01:00
|
|
|
res = res & ~REQ_DATA;
|
|
|
|
}
|
2000-10-13 02:43:31 +02:00
|
|
|
|
2002-01-18 20:17:05 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Special case: <Init> type with <Max OID> tag; this is obsolete and we
|
|
|
|
* always ignore it.
|
2002-01-18 20:17:05 +01:00
|
|
|
*/
|
2002-07-04 17:35:07 +02:00
|
|
|
if ((strcmp(te->desc, "<Init>") == 0) && (strcmp(te->tag, "Max OID") == 0))
|
2005-08-12 03:36:05 +02:00
|
|
|
return 0;
|
2002-01-18 18:13:51 +01:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/* Mask it if we only want schema */
|
|
|
|
if (ropt->schemaOnly)
|
2001-11-04 05:05:36 +01:00
|
|
|
res = res & REQ_SCHEMA;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* Mask it if we only want data */
|
2001-05-12 03:03:59 +02:00
|
|
|
if (ropt->dataOnly)
|
2001-11-04 05:05:36 +01:00
|
|
|
res = res & REQ_DATA;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2003-03-10 23:28:22 +01:00
|
|
|
/* Mask it if we don't have a schema contribution */
|
2001-03-22 05:01:46 +01:00
|
|
|
if (!te->defn || strlen(te->defn) == 0)
|
2001-11-04 05:05:36 +01:00
|
|
|
res = res & ~REQ_SCHEMA;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2006-10-15 01:07:22 +02:00
|
|
|
/* Finally, if there's a per-ID filter, limit based on that as well */
|
|
|
|
if (ropt->idWanted && !ropt->idWanted[te->dumpId - 1])
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
return res;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2010-02-18 02:29:10 +01:00
|
|
|
/*
|
|
|
|
* Identify TOC entries that are ACLs.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
_tocEntryIsACL(TocEntry *te)
|
|
|
|
{
|
|
|
|
/* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */
|
|
|
|
if (strcmp(te->desc, "ACL") == 0 ||
|
|
|
|
strcmp(te->desc, "ACL LANGUAGE") == 0 ||
|
|
|
|
strcmp(te->desc, "DEFAULT ACL") == 0)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
/*
|
|
|
|
* Issue SET commands for parameters that we want to have set the same way
|
|
|
|
* at all times during execution of a restore script.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_doSetFixedOutputState(ArchiveHandle *AH)
|
|
|
|
{
|
2013-03-17 04:22:17 +01:00
|
|
|
/* Disable statement_timeout since restore is probably slow */
|
2008-05-04 10:32:21 +02:00
|
|
|
ahprintf(AH, "SET statement_timeout = 0;\n");
|
2008-05-04 05:46:08 +02:00
|
|
|
|
2013-03-17 04:22:17 +01:00
|
|
|
/* Likewise for lock_timeout */
|
|
|
|
ahprintf(AH, "SET lock_timeout = 0;\n");
|
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
/* Select the correct character set encoding */
|
|
|
|
ahprintf(AH, "SET client_encoding = '%s';\n",
|
|
|
|
pg_encoding_to_char(AH->public.encoding));
|
2004-02-24 04:35:19 +01:00
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
/* Select the correct string literal syntax */
|
|
|
|
ahprintf(AH, "SET standard_conforming_strings = %s;\n",
|
|
|
|
AH->public.std_strings ? "on" : "off");
|
2004-02-24 04:35:19 +01:00
|
|
|
|
2009-01-05 17:54:37 +01:00
|
|
|
/* Select the role to be used during restore */
|
|
|
|
if (AH->ropt && AH->ropt->use_role)
|
|
|
|
ahprintf(AH, "SET ROLE %s;\n", fmtId(AH->ropt->use_role));
|
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
/* Make sure function checking is disabled */
|
|
|
|
ahprintf(AH, "SET check_function_bodies = false;\n");
|
|
|
|
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
/* Avoid annoying notices etc */
|
|
|
|
ahprintf(AH, "SET client_min_messages = warning;\n");
|
2006-05-28 23:13:54 +02:00
|
|
|
if (!AH->public.std_strings)
|
|
|
|
ahprintf(AH, "SET escape_string_warning = off;\n");
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
ahprintf(AH, "\n");
|
|
|
|
}
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
/*
|
|
|
|
* Issue a SET SESSION AUTHORIZATION command. Caller is responsible
|
2003-09-24 00:48:53 +02:00
|
|
|
* for updating state if appropriate. If user is NULL or an empty string,
|
|
|
|
* the specification DEFAULT will be used.
|
2002-05-11 00:36:27 +02:00
|
|
|
*/
|
|
|
|
static void
|
2002-08-18 11:36:26 +02:00
|
|
|
_doSetSessionAuth(ArchiveHandle *AH, const char *user)
|
2002-05-11 00:36:27 +02:00
|
|
|
{
|
2002-08-18 11:36:26 +02:00
|
|
|
PQExpBuffer cmd = createPQExpBuffer();
|
2002-09-04 22:31:48 +02:00
|
|
|
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(cmd, "SET SESSION AUTHORIZATION ");
|
2002-09-04 22:31:48 +02:00
|
|
|
|
2003-09-24 00:48:53 +02:00
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* SQL requires a string literal here. Might as well be correct.
|
2003-09-24 00:48:53 +02:00
|
|
|
*/
|
|
|
|
if (user && *user)
|
2006-05-28 23:13:54 +02:00
|
|
|
appendStringLiteralAHX(cmd, user, AH);
|
2002-08-18 11:36:26 +02:00
|
|
|
else
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(cmd, "DEFAULT");
|
|
|
|
appendPQExpBufferChar(cmd, ';');
|
2002-08-18 11:36:26 +02:00
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
if (RestoringToDB(AH))
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
2002-08-18 11:36:26 +02:00
|
|
|
res = PQexec(AH->connection, cmd->data);
|
2002-05-11 00:36:27 +02:00
|
|
|
|
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
2012-03-20 22:38:11 +01:00
|
|
|
/* NOT warn_or_exit_horribly... use -O instead to skip this. */
|
|
|
|
exit_horribly(modulename, "could not set session user to \"%s\": %s",
|
|
|
|
user, PQerrorMessage(AH->connection));
|
2002-05-11 00:36:27 +02:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
2002-08-18 11:36:26 +02:00
|
|
|
ahprintf(AH, "%s\n\n", cmd->data);
|
|
|
|
|
|
|
|
destroyPQExpBuffer(cmd);
|
2002-05-11 00:36:27 +02:00
|
|
|
}
|
|
|
|
|
2001-08-22 22:23:24 +02:00
|
|
|
|
2004-03-24 04:06:08 +01:00
|
|
|
/*
|
|
|
|
* Issue a SET default_with_oids command. Caller is responsible
|
|
|
|
* for updating state if appropriate.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_doSetWithOids(ArchiveHandle *AH, const bool withOids)
|
|
|
|
{
|
|
|
|
PQExpBuffer cmd = createPQExpBuffer();
|
|
|
|
|
|
|
|
appendPQExpBuffer(cmd, "SET default_with_oids = %s;", withOids ?
|
2004-08-29 07:07:03 +02:00
|
|
|
"true" : "false");
|
2004-03-24 04:06:08 +01:00
|
|
|
|
|
|
|
if (RestoringToDB(AH))
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
res = PQexec(AH->connection, cmd->data);
|
|
|
|
|
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
2012-03-20 22:38:11 +01:00
|
|
|
warn_or_exit_horribly(AH, modulename,
|
|
|
|
"could not set default_with_oids: %s",
|
|
|
|
PQerrorMessage(AH->connection));
|
2004-03-24 04:06:08 +01:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ahprintf(AH, "%s\n\n", cmd->data);
|
|
|
|
|
|
|
|
destroyPQExpBuffer(cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2001-08-22 22:23:24 +02:00
|
|
|
/*
|
2004-09-10 22:05:18 +02:00
|
|
|
* Issue the commands to connect to the specified database.
|
2001-08-22 22:23:24 +02:00
|
|
|
*
|
|
|
|
* If we're currently restoring right into a database, this will
|
2002-09-04 22:31:48 +02:00
|
|
|
* actually establish a connection. Otherwise it puts a \connect into
|
2001-08-22 22:23:24 +02:00
|
|
|
* the script output.
|
2004-09-10 22:05:18 +02:00
|
|
|
*
|
|
|
|
* NULL dbname implies reconnecting to the current DB (pretty useless).
|
2001-08-22 22:23:24 +02:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
static void
|
2004-09-10 22:05:18 +02:00
|
|
|
_reconnectToDB(ArchiveHandle *AH, const char *dbname)
|
2000-07-24 08:24:26 +02:00
|
|
|
{
|
2003-09-24 00:48:53 +02:00
|
|
|
if (RestoringToDB(AH))
|
2004-09-10 22:05:18 +02:00
|
|
|
ReconnectToServer(AH, dbname, NULL);
|
2001-08-22 22:23:24 +02:00
|
|
|
else
|
2002-02-11 01:18:20 +01:00
|
|
|
{
|
|
|
|
PQExpBuffer qry = createPQExpBuffer();
|
|
|
|
|
2004-09-10 22:05:18 +02:00
|
|
|
appendPQExpBuffer(qry, "\\connect %s\n\n",
|
2002-08-18 11:36:26 +02:00
|
|
|
dbname ? fmtId(dbname) : "-");
|
2005-04-30 10:08:51 +02:00
|
|
|
ahprintf(AH, "%s", qry->data);
|
2002-02-11 01:18:20 +01:00
|
|
|
destroyPQExpBuffer(qry);
|
|
|
|
}
|
2001-08-22 22:23:24 +02:00
|
|
|
|
2001-10-25 07:50:21 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* NOTE: currUser keeps track of what the imaginary session user in our
|
|
|
|
* script is. It's now effectively reset to the original userID.
|
2001-10-25 07:50:21 +02:00
|
|
|
*/
|
2001-08-22 22:23:24 +02:00
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->currUser = NULL;
|
2001-08-22 22:23:24 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/* don't assume we still know the output schema, tablespace, etc either */
|
2003-09-24 00:48:53 +02:00
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->currSchema = NULL;
|
|
|
|
if (AH->currTablespace)
|
|
|
|
free(AH->currTablespace);
|
|
|
|
AH->currTablespace = NULL;
|
2004-03-24 04:06:08 +01:00
|
|
|
AH->currWithOids = -1;
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
/* re-establish fixed state */
|
|
|
|
_doSetFixedOutputState(AH);
|
2000-08-01 17:51:45 +02:00
|
|
|
}
|
|
|
|
|
2003-09-24 00:48:53 +02:00
|
|
|
/*
|
|
|
|
* Become the specified user, and update state to avoid redundant commands
|
|
|
|
*
|
|
|
|
* NULL or empty argument is taken to mean restoring the session default
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_becomeUser(ArchiveHandle *AH, const char *user)
|
|
|
|
{
|
|
|
|
if (!user)
|
|
|
|
user = ""; /* avoid null pointers */
|
|
|
|
|
|
|
|
if (AH->currUser && strcmp(AH->currUser, user) == 0)
|
|
|
|
return; /* no need to do anything */
|
|
|
|
|
|
|
|
_doSetSessionAuth(AH, user);
|
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* NOTE: currUser keeps track of what the imaginary session user in our
|
|
|
|
* script is
|
2003-09-24 00:48:53 +02:00
|
|
|
*/
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->currUser = pg_strdup(user);
|
2003-09-24 00:48:53 +02:00
|
|
|
}
|
2001-08-22 22:23:24 +02:00
|
|
|
|
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Become the owner of the given TOC entry object. If
|
2001-08-22 22:23:24 +02:00
|
|
|
* changes in ownership are not allowed, this doesn't do anything.
|
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
static void
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeOwner(ArchiveHandle *AH, TocEntry *te)
|
2000-08-01 17:51:45 +02:00
|
|
|
{
|
2004-07-13 05:00:17 +02:00
|
|
|
if (AH->ropt && (AH->ropt->noOwner || !AH->ropt->use_setsessauth))
|
2000-08-01 17:51:45 +02:00
|
|
|
return;
|
|
|
|
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeUser(AH, te->owner);
|
2000-07-24 08:24:26 +02:00
|
|
|
}
|
|
|
|
|
2001-08-22 22:23:24 +02:00
|
|
|
|
2004-03-24 04:06:08 +01:00
|
|
|
/*
|
|
|
|
* Set the proper default_with_oids value for the table.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_setWithOids(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
if (AH->currWithOids != te->withOids)
|
|
|
|
{
|
|
|
|
_doSetWithOids(AH, te->withOids);
|
|
|
|
AH->currWithOids = te->withOids;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
/*
|
|
|
|
* Issue the commands to select the specified schema as the current schema
|
|
|
|
* in the target database.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
|
|
|
|
{
|
2002-05-29 00:26:57 +02:00
|
|
|
PQExpBuffer qry;
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
if (!schemaName || *schemaName == '\0' ||
|
2006-04-19 18:02:17 +02:00
|
|
|
(AH->currSchema && strcmp(AH->currSchema, schemaName) == 0))
|
2002-05-11 00:36:27 +02:00
|
|
|
return; /* no need to do anything */
|
|
|
|
|
2002-05-29 00:26:57 +02:00
|
|
|
qry = createPQExpBuffer();
|
|
|
|
|
|
|
|
appendPQExpBuffer(qry, "SET search_path = %s",
|
2002-08-18 11:36:26 +02:00
|
|
|
fmtId(schemaName));
|
2002-05-29 00:26:57 +02:00
|
|
|
if (strcmp(schemaName, "pg_catalog") != 0)
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(qry, ", pg_catalog");
|
2002-05-29 00:26:57 +02:00
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
if (RestoringToDB(AH))
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
res = PQexec(AH->connection, qry->data);
|
|
|
|
|
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
2012-03-20 22:38:11 +01:00
|
|
|
warn_or_exit_horribly(AH, modulename,
|
|
|
|
"could not set search_path to \"%s\": %s",
|
|
|
|
schemaName, PQerrorMessage(AH->connection));
|
2002-05-11 00:36:27 +02:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
2002-05-29 00:26:57 +02:00
|
|
|
ahprintf(AH, "%s;\n\n", qry->data);
|
2002-05-11 00:36:27 +02:00
|
|
|
|
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->currSchema = pg_strdup(schemaName);
|
2002-05-29 00:26:57 +02:00
|
|
|
|
|
|
|
destroyPQExpBuffer(qry);
|
2002-05-11 00:36:27 +02:00
|
|
|
}
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
/*
|
|
|
|
* Issue the commands to select the specified tablespace as the current one
|
|
|
|
* in the target database.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_selectTablespace(ArchiveHandle *AH, const char *tablespace)
|
|
|
|
{
|
|
|
|
PQExpBuffer qry;
|
2005-10-15 04:49:52 +02:00
|
|
|
const char *want,
|
|
|
|
*have;
|
2004-11-06 20:36:02 +01:00
|
|
|
|
2008-03-20 18:36:58 +01:00
|
|
|
/* do nothing in --no-tablespaces mode */
|
|
|
|
if (AH->ropt->noTablespace)
|
|
|
|
return;
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
have = AH->currTablespace;
|
|
|
|
want = tablespace;
|
|
|
|
|
|
|
|
/* no need to do anything for non-tablespace object */
|
|
|
|
if (!want)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (have && strcmp(want, have) == 0)
|
|
|
|
return; /* no need to do anything */
|
|
|
|
|
|
|
|
qry = createPQExpBuffer();
|
|
|
|
|
|
|
|
if (strcmp(want, "") == 0)
|
|
|
|
{
|
|
|
|
/* We want the tablespace to be the database's default */
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(qry, "SET default_tablespace = ''");
|
2004-11-06 20:36:02 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We want an explicit tablespace */
|
|
|
|
appendPQExpBuffer(qry, "SET default_tablespace = %s", fmtId(want));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RestoringToDB(AH))
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
res = PQexec(AH->connection, qry->data);
|
|
|
|
|
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
2012-03-20 22:38:11 +01:00
|
|
|
warn_or_exit_horribly(AH, modulename,
|
2012-06-10 21:20:04 +02:00
|
|
|
"could not set default_tablespace to %s: %s",
|
|
|
|
fmtId(want), PQerrorMessage(AH->connection));
|
2004-11-06 20:36:02 +01:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ahprintf(AH, "%s;\n\n", qry->data);
|
|
|
|
|
|
|
|
if (AH->currTablespace)
|
|
|
|
free(AH->currTablespace);
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->currTablespace = pg_strdup(want);
|
2004-11-06 20:36:02 +01:00
|
|
|
|
|
|
|
destroyPQExpBuffer(qry);
|
|
|
|
}
|
2002-05-11 00:36:27 +02:00
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
/*
|
|
|
|
* Extract an object description for a TOC entry, and append it to buf.
|
|
|
|
*
|
2013-08-13 17:45:56 +02:00
|
|
|
* This is used for ALTER ... OWNER TO.
|
2004-07-13 05:00:17 +02:00
|
|
|
*/
|
2005-01-11 06:14:13 +01:00
|
|
|
static void
|
2005-01-23 01:03:54 +01:00
|
|
|
_getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH)
|
2004-07-13 05:00:17 +02:00
|
|
|
{
|
2005-01-11 06:14:13 +01:00
|
|
|
const char *type = te->desc;
|
|
|
|
|
|
|
|
/* Use ALTER TABLE for views and sequences */
|
2013-03-24 16:27:20 +01:00
|
|
|
if (strcmp(type, "VIEW") == 0 || strcmp(type, "SEQUENCE") == 0 ||
|
2013-03-04 01:23:31 +01:00
|
|
|
strcmp(type, "MATERIALIZED VIEW") == 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
type = "TABLE";
|
|
|
|
|
2013-08-13 17:45:56 +02:00
|
|
|
/* objects that don't require special decoration */
|
2011-02-12 14:54:13 +01:00
|
|
|
if (strcmp(type, "COLLATION") == 0 ||
|
|
|
|
strcmp(type, "CONVERSION") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(type, "DOMAIN") == 0 ||
|
|
|
|
strcmp(type, "TABLE") == 0 ||
|
2007-08-21 03:11:32 +02:00
|
|
|
strcmp(type, "TYPE") == 0 ||
|
2011-01-02 05:48:11 +01:00
|
|
|
strcmp(type, "FOREIGN TABLE") == 0 ||
|
2007-08-21 03:11:32 +02:00
|
|
|
strcmp(type, "TEXT SEARCH DICTIONARY") == 0 ||
|
2013-08-13 17:45:56 +02:00
|
|
|
strcmp(type, "TEXT SEARCH CONFIGURATION") == 0 ||
|
2014-05-06 18:12:18 +02:00
|
|
|
/* non-schema-specified objects */
|
2013-08-13 17:45:56 +02:00
|
|
|
strcmp(type, "DATABASE") == 0 ||
|
2007-03-26 18:58:41 +02:00
|
|
|
strcmp(type, "PROCEDURAL LANGUAGE") == 0 ||
|
2008-12-19 17:25:19 +01:00
|
|
|
strcmp(type, "SCHEMA") == 0 ||
|
|
|
|
strcmp(type, "FOREIGN DATA WRAPPER") == 0 ||
|
|
|
|
strcmp(type, "SERVER") == 0 ||
|
|
|
|
strcmp(type, "USER MAPPING") == 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
{
|
2013-08-13 17:45:56 +02:00
|
|
|
/* We already know that search_path was set properly */
|
2005-01-11 06:14:13 +01:00
|
|
|
appendPQExpBuffer(buf, "%s %s", type, fmtId(te->tag));
|
|
|
|
return;
|
|
|
|
}
|
2002-08-18 11:36:26 +02:00
|
|
|
|
2010-02-18 02:29:10 +01:00
|
|
|
/* BLOBs just have a name, but it's numeric so must not use fmtId */
|
|
|
|
if (strcmp(type, "BLOB") == 0)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2004-08-29 07:07:03 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* These object types require additional decoration. Fortunately, the
|
|
|
|
* information needed is exactly what's in the DROP command.
|
2004-08-29 07:07:03 +02:00
|
|
|
*/
|
2005-01-11 06:14:13 +01:00
|
|
|
if (strcmp(type, "AGGREGATE") == 0 ||
|
|
|
|
strcmp(type, "FUNCTION") == 0 ||
|
|
|
|
strcmp(type, "OPERATOR") == 0 ||
|
2007-01-23 18:54:50 +01:00
|
|
|
strcmp(type, "OPERATOR CLASS") == 0 ||
|
|
|
|
strcmp(type, "OPERATOR FAMILY") == 0)
|
2004-08-29 07:07:03 +02:00
|
|
|
{
|
2005-01-11 06:14:13 +01:00
|
|
|
/* Chop "DROP " off the front and make a modifiable copy */
|
2011-11-25 21:40:51 +01:00
|
|
|
char *first = pg_strdup(te->dropStmt + 5);
|
2005-01-11 06:14:13 +01:00
|
|
|
char *last;
|
2004-07-13 05:00:17 +02:00
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
/* point to last character in string */
|
|
|
|
last = first + strlen(first) - 1;
|
2004-07-13 05:00:17 +02:00
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
/* Strip off any ';' or '\n' at the end */
|
|
|
|
while (last >= first && (*last == '\n' || *last == ';'))
|
|
|
|
last--;
|
|
|
|
*(last + 1) = '\0';
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
appendPQExpBufferStr(buf, first);
|
2004-08-29 07:07:03 +02:00
|
|
|
|
|
|
|
free(first);
|
2005-01-11 06:14:13 +01:00
|
|
|
return;
|
2004-07-13 05:00:17 +02:00
|
|
|
}
|
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
write_msg(modulename, "WARNING: don't know how to set owner for object type %s\n",
|
|
|
|
type);
|
2004-07-13 05:00:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2004-08-13 23:37:28 +02:00
|
|
|
_printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2004-08-13 23:37:28 +02:00
|
|
|
/* ACLs are dumped only during acl pass */
|
|
|
|
if (acl_pass)
|
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
if (!_tocEntryIsACL(te))
|
2004-08-13 23:37:28 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
if (_tocEntryIsACL(te))
|
2004-08-13 23:37:28 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid dumping the public schema, as it will already be created ...
|
2005-10-15 04:49:52 +02:00
|
|
|
* unless we are using --clean mode, in which case it's been deleted and
|
2008-01-14 20:27:41 +01:00
|
|
|
* we'd better recreate it. Likewise for its comment, if any.
|
2004-08-13 23:37:28 +02:00
|
|
|
*/
|
2008-01-14 20:27:41 +01:00
|
|
|
if (!ropt->dropSchema)
|
|
|
|
{
|
|
|
|
if (strcmp(te->desc, "SCHEMA") == 0 &&
|
|
|
|
strcmp(te->tag, "public") == 0)
|
|
|
|
return;
|
2008-09-06 01:53:42 +02:00
|
|
|
/* The comment restore would require super-user privs, so avoid it. */
|
2008-01-14 20:27:41 +01:00
|
|
|
if (strcmp(te->desc, "COMMENT") == 0 &&
|
|
|
|
strcmp(te->tag, "SCHEMA public") == 0)
|
|
|
|
return;
|
|
|
|
}
|
2004-08-13 23:37:28 +02:00
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
/* Select owner, schema, and tablespace as necessary */
|
2004-08-13 23:37:28 +02:00
|
|
|
_becomeOwner(AH, te);
|
|
|
|
_selectOutputSchema(AH, te->namespace);
|
2004-11-06 20:36:02 +01:00
|
|
|
_selectTablespace(AH, te->tablespace);
|
2004-08-13 23:37:28 +02:00
|
|
|
|
|
|
|
/* Set up OID mode too */
|
|
|
|
if (strcmp(te->desc, "TABLE") == 0)
|
|
|
|
_setWithOids(AH, te);
|
|
|
|
|
|
|
|
/* Emit header comment for item */
|
2004-08-30 21:44:14 +02:00
|
|
|
if (!AH->noTocComments)
|
2003-12-06 04:00:16 +01:00
|
|
|
{
|
2004-08-30 21:44:14 +02:00
|
|
|
const char *pfx;
|
2012-02-23 21:53:09 +01:00
|
|
|
char *sanitized_name;
|
|
|
|
char *sanitized_schema;
|
|
|
|
char *sanitized_owner;
|
2004-08-30 21:44:14 +02:00
|
|
|
|
|
|
|
if (isData)
|
|
|
|
pfx = "Data for ";
|
|
|
|
else
|
|
|
|
pfx = "";
|
|
|
|
|
|
|
|
ahprintf(AH, "--\n");
|
|
|
|
if (AH->public.verbose)
|
2003-12-06 04:00:16 +01:00
|
|
|
{
|
2004-08-30 21:44:14 +02:00
|
|
|
ahprintf(AH, "-- TOC entry %d (class %u OID %u)\n",
|
|
|
|
te->dumpId, te->catalogId.tableoid, te->catalogId.oid);
|
|
|
|
if (te->nDeps > 0)
|
|
|
|
{
|
|
|
|
int i;
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2004-08-30 21:44:14 +02:00
|
|
|
ahprintf(AH, "-- Dependencies:");
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
ahprintf(AH, " %d", te->dependencies[i]);
|
|
|
|
ahprintf(AH, "\n");
|
|
|
|
}
|
2003-12-06 04:00:16 +01:00
|
|
|
}
|
2012-02-23 21:53:09 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Zap any line endings embedded in user-supplied fields, to prevent
|
|
|
|
* corruption of the dump (which could, in the worst case, present an
|
|
|
|
* SQL injection vulnerability if someone were to incautiously load a
|
|
|
|
* dump containing objects with maliciously crafted names).
|
|
|
|
*/
|
|
|
|
sanitized_name = replace_line_endings(te->tag);
|
|
|
|
if (te->namespace)
|
|
|
|
sanitized_schema = replace_line_endings(te->namespace);
|
|
|
|
else
|
|
|
|
sanitized_schema = pg_strdup("-");
|
|
|
|
if (!ropt->noOwner)
|
|
|
|
sanitized_owner = replace_line_endings(te->owner);
|
|
|
|
else
|
|
|
|
sanitized_owner = pg_strdup("-");
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
ahprintf(AH, "-- %sName: %s; Type: %s; Schema: %s; Owner: %s",
|
2012-02-23 21:53:09 +01:00
|
|
|
pfx, sanitized_name, te->desc, sanitized_schema,
|
|
|
|
sanitized_owner);
|
|
|
|
|
|
|
|
free(sanitized_name);
|
|
|
|
free(sanitized_schema);
|
|
|
|
free(sanitized_owner);
|
|
|
|
|
2008-03-20 18:36:58 +01:00
|
|
|
if (te->tablespace && !ropt->noTablespace)
|
2012-02-23 21:53:09 +01:00
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
char *sanitized_tablespace;
|
2012-02-23 21:53:09 +01:00
|
|
|
|
|
|
|
sanitized_tablespace = replace_line_endings(te->tablespace);
|
|
|
|
ahprintf(AH, "; Tablespace: %s", sanitized_tablespace);
|
|
|
|
free(sanitized_tablespace);
|
|
|
|
}
|
2004-11-06 20:36:02 +01:00
|
|
|
ahprintf(AH, "\n");
|
|
|
|
|
2005-10-15 04:49:52 +02:00
|
|
|
if (AH->PrintExtraTocPtr !=NULL)
|
2004-08-30 21:44:14 +02:00
|
|
|
(*AH->PrintExtraTocPtr) (AH, te);
|
|
|
|
ahprintf(AH, "--\n\n");
|
2003-12-06 04:00:16 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2004-08-13 23:37:28 +02:00
|
|
|
/*
|
|
|
|
* Actually print the definition.
|
|
|
|
*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Really crude hack for suppressing AUTHORIZATION clause that old pg_dump
|
|
|
|
* versions put into CREATE SCHEMA. We have to do this when --no-owner
|
|
|
|
* mode is selected. This is ugly, but I see no other good way ...
|
2004-08-13 23:37:28 +02:00
|
|
|
*/
|
2005-01-11 06:14:13 +01:00
|
|
|
if (ropt->noOwner && strcmp(te->desc, "SCHEMA") == 0)
|
2004-07-13 05:00:17 +02:00
|
|
|
{
|
2005-01-11 06:14:13 +01:00
|
|
|
ahprintf(AH, "CREATE SCHEMA %s;\n\n\n", fmtId(te->tag));
|
2003-09-24 01:31:52 +02:00
|
|
|
}
|
2004-08-13 23:37:28 +02:00
|
|
|
else
|
2003-09-24 01:31:52 +02:00
|
|
|
{
|
2004-08-13 23:37:28 +02:00
|
|
|
if (strlen(te->defn) > 0)
|
|
|
|
ahprintf(AH, "%s\n\n", te->defn);
|
2004-07-13 05:00:17 +02:00
|
|
|
}
|
2004-08-13 23:37:28 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we aren't using SET SESSION AUTH to determine ownership, we must
|
2005-01-11 06:14:13 +01:00
|
|
|
* instead issue an ALTER OWNER command. We assume that anything without
|
|
|
|
* a DROP command is not a separately ownable object. All the categories
|
|
|
|
* with DROP commands must appear in one list or the other.
|
2004-08-13 23:37:28 +02:00
|
|
|
*/
|
|
|
|
if (!ropt->noOwner && !ropt->use_setsessauth &&
|
2005-01-11 06:14:13 +01:00
|
|
|
strlen(te->owner) > 0 && strlen(te->dropStmt) > 0)
|
|
|
|
{
|
|
|
|
if (strcmp(te->desc, "AGGREGATE") == 0 ||
|
2010-02-18 02:29:10 +01:00
|
|
|
strcmp(te->desc, "BLOB") == 0 ||
|
2011-02-12 14:54:13 +01:00
|
|
|
strcmp(te->desc, "COLLATION") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "CONVERSION") == 0 ||
|
|
|
|
strcmp(te->desc, "DATABASE") == 0 ||
|
|
|
|
strcmp(te->desc, "DOMAIN") == 0 ||
|
|
|
|
strcmp(te->desc, "FUNCTION") == 0 ||
|
|
|
|
strcmp(te->desc, "OPERATOR") == 0 ||
|
|
|
|
strcmp(te->desc, "OPERATOR CLASS") == 0 ||
|
2007-01-23 18:54:50 +01:00
|
|
|
strcmp(te->desc, "OPERATOR FAMILY") == 0 ||
|
2007-03-26 18:58:41 +02:00
|
|
|
strcmp(te->desc, "PROCEDURAL LANGUAGE") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "SCHEMA") == 0 ||
|
|
|
|
strcmp(te->desc, "TABLE") == 0 ||
|
|
|
|
strcmp(te->desc, "TYPE") == 0 ||
|
|
|
|
strcmp(te->desc, "VIEW") == 0 ||
|
2013-03-04 01:23:31 +01:00
|
|
|
strcmp(te->desc, "MATERIALIZED VIEW") == 0 ||
|
2007-08-21 03:11:32 +02:00
|
|
|
strcmp(te->desc, "SEQUENCE") == 0 ||
|
2011-01-02 05:48:11 +01:00
|
|
|
strcmp(te->desc, "FOREIGN TABLE") == 0 ||
|
2007-08-21 03:11:32 +02:00
|
|
|
strcmp(te->desc, "TEXT SEARCH DICTIONARY") == 0 ||
|
2008-12-19 17:25:19 +01:00
|
|
|
strcmp(te->desc, "TEXT SEARCH CONFIGURATION") == 0 ||
|
|
|
|
strcmp(te->desc, "FOREIGN DATA WRAPPER") == 0 ||
|
|
|
|
strcmp(te->desc, "SERVER") == 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
{
|
|
|
|
PQExpBuffer temp = createPQExpBuffer();
|
|
|
|
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(temp, "ALTER ");
|
2005-01-23 01:03:54 +01:00
|
|
|
_getObjectDescription(temp, te, AH);
|
2005-01-11 06:14:13 +01:00
|
|
|
appendPQExpBuffer(temp, " OWNER TO %s;", fmtId(te->owner));
|
|
|
|
ahprintf(AH, "%s\n\n", temp->data);
|
|
|
|
destroyPQExpBuffer(temp);
|
|
|
|
}
|
|
|
|
else if (strcmp(te->desc, "CAST") == 0 ||
|
|
|
|
strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
2005-08-22 21:40:37 +02:00
|
|
|
strcmp(te->desc, "CONSTRAINT") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "DEFAULT") == 0 ||
|
|
|
|
strcmp(te->desc, "FK CONSTRAINT") == 0 ||
|
2005-08-22 21:40:37 +02:00
|
|
|
strcmp(te->desc, "INDEX") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "RULE") == 0 ||
|
2008-12-19 17:25:19 +01:00
|
|
|
strcmp(te->desc, "TRIGGER") == 0 ||
|
|
|
|
strcmp(te->desc, "USER MAPPING") == 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
{
|
|
|
|
/* these object types don't have separate owners */
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
write_msg(modulename, "WARNING: don't know how to set owner for object type %s\n",
|
|
|
|
te->desc);
|
|
|
|
}
|
2003-09-24 01:31:52 +02:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2004-07-19 23:02:17 +02:00
|
|
|
/*
|
|
|
|
* If it's an ACL entry, it might contain SET SESSION AUTHORIZATION
|
2005-10-15 04:49:52 +02:00
|
|
|
* commands, so we can no longer assume we know the current auth setting.
|
2004-07-19 23:02:17 +02:00
|
|
|
*/
|
2010-02-18 02:29:10 +01:00
|
|
|
if (acl_pass)
|
2004-07-19 23:02:17 +02:00
|
|
|
{
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
|
|
|
AH->currUser = NULL;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2012-02-23 21:53:09 +01:00
|
|
|
/*
|
|
|
|
* Sanitize a string to be included in an SQL comment, by replacing any
|
|
|
|
* newlines with spaces.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
replace_line_endings(const char *str)
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
char *result;
|
|
|
|
char *s;
|
2012-02-23 21:53:09 +01:00
|
|
|
|
|
|
|
result = pg_strdup(str);
|
|
|
|
|
|
|
|
for (s = result; *s != '\0'; s++)
|
|
|
|
{
|
|
|
|
if (*s == '\n' || *s == '\r')
|
|
|
|
*s = ' ';
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
WriteHead(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
struct tm crtm;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
(*AH->WriteBufPtr) (AH, "PGDMP", 5); /* Magic code */
|
|
|
|
(*AH->WriteBytePtr) (AH, AH->vmaj);
|
|
|
|
(*AH->WriteBytePtr) (AH, AH->vmin);
|
|
|
|
(*AH->WriteBytePtr) (AH, AH->vrev);
|
|
|
|
(*AH->WriteBytePtr) (AH, AH->intSize);
|
2002-10-22 21:15:23 +02:00
|
|
|
(*AH->WriteBytePtr) (AH, AH->offSize);
|
2001-03-22 05:01:46 +01:00
|
|
|
(*AH->WriteBytePtr) (AH, AH->format);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-06 20:39:39 +02:00
|
|
|
#ifndef HAVE_LIBZ
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->compression != 0)
|
2001-06-27 23:21:37 +02:00
|
|
|
write_msg(modulename, "WARNING: requested compression not available in this "
|
2003-07-23 10:47:41 +02:00
|
|
|
"installation -- archive will be uncompressed\n");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->compression = 0;
|
2000-07-21 13:40:08 +02:00
|
|
|
#endif
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
WriteInt(AH, AH->compression);
|
|
|
|
|
|
|
|
crtm = *localtime(&AH->createDate);
|
|
|
|
WriteInt(AH, crtm.tm_sec);
|
|
|
|
WriteInt(AH, crtm.tm_min);
|
|
|
|
WriteInt(AH, crtm.tm_hour);
|
|
|
|
WriteInt(AH, crtm.tm_mday);
|
|
|
|
WriteInt(AH, crtm.tm_mon);
|
|
|
|
WriteInt(AH, crtm.tm_year);
|
|
|
|
WriteInt(AH, crtm.tm_isdst);
|
2002-08-10 18:57:32 +02:00
|
|
|
WriteStr(AH, PQdb(AH->connection));
|
2004-11-06 20:36:02 +01:00
|
|
|
WriteStr(AH, AH->public.remoteVersionStr);
|
|
|
|
WriteStr(AH, PG_VERSION);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
|
|
|
ReadHead(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
char tmpMag[7];
|
|
|
|
int fmt;
|
2000-07-21 13:40:08 +02:00
|
|
|
struct tm crtm;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-06-28 04:07:02 +02:00
|
|
|
/*
|
|
|
|
* If we haven't already read the header, do so.
|
|
|
|
*
|
2014-05-06 18:12:18 +02:00
|
|
|
* NB: this code must agree with _discoverArchiveFormat(). Maybe find a
|
2010-07-06 21:19:02 +02:00
|
|
|
* way to unify the cases?
|
2010-06-28 04:07:02 +02:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
if (!AH->readHeader)
|
|
|
|
{
|
2014-05-06 02:27:16 +02:00
|
|
|
(*AH->ReadBufPtr) (AH, tmpMag, 5);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (strncmp(tmpMag, "PGDMP", 5) != 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "did not find magic string in file header\n");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->vmaj = (*AH->ReadBytePtr) (AH);
|
|
|
|
AH->vmin = (*AH->ReadBytePtr) (AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->vmaj > 1 || ((AH->vmaj == 1) && (AH->vmin > 0))) /* Version > 1.0 */
|
|
|
|
AH->vrev = (*AH->ReadBytePtr) (AH);
|
|
|
|
else
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->vrev = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->version = ((AH->vmaj * 256 + AH->vmin) * 256 + AH->vrev) * 256 + 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "unsupported version (%d.%d) in file header\n",
|
|
|
|
AH->vmaj, AH->vmin);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->intSize = (*AH->ReadBytePtr) (AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->intSize > 32)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "sanity check on integer size (%lu) failed\n",
|
|
|
|
(unsigned long) AH->intSize);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->intSize > sizeof(int))
|
Wording cleanup for error messages. Also change can't -> cannot.
Standard English uses "may", "can", and "might" in different ways:
may - permission, "You may borrow my rake."
can - ability, "I can lift that log."
might - possibility, "It might rain today."
Unfortunately, in conversational English, their use is often mixed, as
in, "You may use this variable to do X", when in fact, "can" is a better
choice. Similarly, "It may crash" is better stated, "It might crash".
2007-02-01 20:10:30 +01:00
|
|
|
write_msg(modulename, "WARNING: archive was made on a machine with larger integers, some operations might fail\n");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
if (AH->version >= K_VERS_1_7)
|
2003-08-04 02:43:34 +02:00
|
|
|
AH->offSize = (*AH->ReadBytePtr) (AH);
|
2002-10-22 21:15:23 +02:00
|
|
|
else
|
2003-08-04 02:43:34 +02:00
|
|
|
AH->offSize = AH->intSize;
|
2002-10-22 21:15:23 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
fmt = (*AH->ReadBytePtr) (AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->format != fmt)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "expected format (%d) differs from format found in file (%d)\n",
|
|
|
|
AH->format, fmt);
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->version >= K_VERS_1_2)
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->version < K_VERS_1_4)
|
2001-03-22 05:01:46 +01:00
|
|
|
AH->compression = (*AH->ReadBytePtr) (AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
|
|
|
AH->compression = ReadInt(AH);
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
else
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->compression = Z_DEFAULT_COMPRESSION;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-06 20:39:39 +02:00
|
|
|
#ifndef HAVE_LIBZ
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->compression != 0)
|
2003-07-23 10:47:41 +02:00
|
|
|
write_msg(modulename, "WARNING: archive is compressed, but this installation does not support compression -- no data will be available\n");
|
2000-07-04 16:25:28 +02:00
|
|
|
#endif
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->version >= K_VERS_1_4)
|
|
|
|
{
|
|
|
|
crtm.tm_sec = ReadInt(AH);
|
|
|
|
crtm.tm_min = ReadInt(AH);
|
|
|
|
crtm.tm_hour = ReadInt(AH);
|
|
|
|
crtm.tm_mday = ReadInt(AH);
|
|
|
|
crtm.tm_mon = ReadInt(AH);
|
|
|
|
crtm.tm_year = ReadInt(AH);
|
|
|
|
crtm.tm_isdst = ReadInt(AH);
|
|
|
|
|
|
|
|
AH->archdbname = ReadStr(AH);
|
|
|
|
|
|
|
|
AH->createDate = mktime(&crtm);
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (AH->createDate == (time_t) -1)
|
2001-07-03 22:21:50 +02:00
|
|
|
write_msg(modulename, "WARNING: invalid creation date in header\n");
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
if (AH->version >= K_VERS_1_10)
|
|
|
|
{
|
|
|
|
AH->archiveRemoteVersion = ReadStr(AH);
|
|
|
|
AH->archiveDumpVersion = ReadStr(AH);
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-10-25 03:33:17 +02:00
|
|
|
/*
|
|
|
|
* checkSeek
|
2010-06-28 04:07:02 +02:00
|
|
|
* check to see if ftell/fseek can be performed.
|
2002-10-25 03:33:17 +02:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
checkSeek(FILE *fp)
|
|
|
|
{
|
2010-06-28 04:07:02 +02:00
|
|
|
pgoff_t tpos;
|
|
|
|
|
|
|
|
/*
|
2010-07-06 21:19:02 +02:00
|
|
|
* If pgoff_t is wider than long, we must have "real" fseeko and not an
|
|
|
|
* emulation using fseek. Otherwise report no seek capability.
|
2010-06-28 04:07:02 +02:00
|
|
|
*/
|
|
|
|
#ifndef HAVE_FSEEKO
|
|
|
|
if (sizeof(pgoff_t) > sizeof(long))
|
2002-10-25 03:33:17 +02:00
|
|
|
return false;
|
|
|
|
#endif
|
2010-06-28 04:07:02 +02:00
|
|
|
|
|
|
|
/* Check that ftello works on this file */
|
|
|
|
tpos = ftello(fp);
|
2014-02-10 00:28:14 +01:00
|
|
|
if (tpos < 0)
|
2010-06-28 04:07:02 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
|
2010-06-28 04:07:02 +02:00
|
|
|
* this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
|
|
|
|
* successful no-op even on files that are otherwise unseekable.
|
|
|
|
*/
|
|
|
|
if (fseeko(fp, tpos, SEEK_SET) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2002-10-25 03:33:17 +02:00
|
|
|
}
|
2005-04-15 18:40:36 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dumpTimestamp
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
|
|
|
|
{
|
|
|
|
char buf[256];
|
|
|
|
|
2006-11-21 23:19:46 +01:00
|
|
|
/*
|
|
|
|
* We don't print the timezone on Win32, because the names are long and
|
|
|
|
* localized, which means they may contain characters in various random
|
2007-11-15 22:14:46 +01:00
|
|
|
* encodings; this has been seen to cause encoding errors when reading the
|
|
|
|
* dump script.
|
2006-11-21 23:19:46 +01:00
|
|
|
*/
|
|
|
|
if (strftime(buf, sizeof(buf),
|
|
|
|
#ifndef WIN32
|
|
|
|
"%Y-%m-%d %H:%M:%S %Z",
|
|
|
|
#else
|
|
|
|
"%Y-%m-%d %H:%M:%S",
|
|
|
|
#endif
|
|
|
|
localtime(&tim)) != 0)
|
2005-04-15 18:40:36 +02:00
|
|
|
ahprintf(AH, "-- %s %s\n\n", msg, buf);
|
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Main engine for parallel restore.
|
|
|
|
*
|
|
|
|
* Work is done in three phases.
|
2011-02-18 19:11:45 +01:00
|
|
|
* First we process all SECTION_PRE_DATA tocEntries, in a single connection,
|
2014-05-06 18:12:18 +02:00
|
|
|
* just as for a standard restore. Second we process the remaining non-ACL
|
2011-02-18 19:11:45 +01:00
|
|
|
* steps in parallel worker children (threads on Windows, processes on Unix),
|
|
|
|
* each of which connects separately to the database. Finally we process all
|
|
|
|
* the ACL entries in a single connection (that happens back in
|
|
|
|
* RestoreArchive).
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
static void
|
2013-03-24 16:27:20 +01:00
|
|
|
restore_toc_entries_prefork(ArchiveHandle *AH)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
RestoreOptions *ropt = AH->ropt;
|
2011-02-18 19:11:45 +01:00
|
|
|
bool skipped_some;
|
2009-02-02 21:07:37 +01:00
|
|
|
TocEntry *next_work_item;
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
ahlog(AH, 2, "entering restore_toc_entries_prefork\n");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* Adjust dependency information */
|
|
|
|
fix_dependencies(AH);
|
|
|
|
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* Do all the early stuff in a single connection in the parent. There's no
|
|
|
|
* great point in running it in parallel, in fact it will actually run
|
|
|
|
* faster in a single connection because we avoid all the connection and
|
2014-05-06 18:12:18 +02:00
|
|
|
* setup overhead. Also, pre-9.2 pg_dump versions were not very good
|
Make pg_dump emit more accurate dependency information.
While pg_dump has included dependency information in archive-format output
ever since 7.3, it never made any large effort to ensure that that
information was actually useful. In particular, in common situations where
dependency chains include objects that aren't separately emitted in the
dump, the dependencies shown for objects that were emitted would reference
the dump IDs of these un-dumped objects, leaving no clue about which other
objects the visible objects indirectly depend on. So far, parallel
pg_restore has managed to avoid tripping over this misfeature, but only
by dint of some crude hacks like not trusting dependency information in
the pre-data section of the archive.
It seems prudent to do something about this before it rises up to bite us,
so instead of emitting the "raw" dependencies of each dumped object,
recursively search for its actual dependencies among the subset of objects
that are being dumped.
Back-patch to 9.2, since that code hasn't yet diverged materially from
HEAD. At some point we might need to back-patch further, but right now
there are no known cases where this is actively necessary. (The one known
case, bug #6699, is fixed in a different way by my previous patch.) Since
this patch depends on 9.2 changes that made TOC entries be marked before
output commences as to whether they'll be dumped, back-patching further
would require additional surgery; and as of now there's no evidence that
it's worth the risk.
2012-06-26 03:20:24 +02:00
|
|
|
* about showing all the dependencies of SECTION_PRE_DATA items, so we do
|
|
|
|
* not risk trying to process them out-of-order.
|
|
|
|
*
|
|
|
|
* Note: as of 9.2, it should be guaranteed that all PRE_DATA items appear
|
|
|
|
* before DATA items, and all DATA items before POST_DATA items. That is
|
|
|
|
* not certain to be true in older archives, though, so this loop is coded
|
|
|
|
* to not assume it.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2011-02-18 19:11:45 +01:00
|
|
|
skipped_some = false;
|
2009-08-08 00:48:34 +02:00
|
|
|
for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2011-02-18 19:11:45 +01:00
|
|
|
/* NB: process-or-continue logic must be the inverse of loop below */
|
|
|
|
if (next_work_item->section != SECTION_PRE_DATA)
|
|
|
|
{
|
|
|
|
/* DATA and POST_DATA items are just ignored for now */
|
|
|
|
if (next_work_item->section == SECTION_DATA ||
|
|
|
|
next_work_item->section == SECTION_POST_DATA)
|
|
|
|
{
|
|
|
|
skipped_some = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* SECTION_NONE items, such as comments, can be processed now
|
|
|
|
* if we are still in the PRE_DATA part of the archive. Once
|
|
|
|
* we've skipped any items, we have to consider whether the
|
|
|
|
* comment's dependencies are satisfied, so skip it for now.
|
|
|
|
*/
|
|
|
|
if (skipped_some)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
ahlog(AH, 1, "processing item %d %s %s\n",
|
|
|
|
next_work_item->dumpId,
|
|
|
|
next_work_item->desc, next_work_item->tag);
|
|
|
|
|
|
|
|
(void) restore_toc_entry(AH, next_work_item, ropt, false);
|
|
|
|
|
2009-08-08 00:48:34 +02:00
|
|
|
/* there should be no touch of ready_list here, so pass NULL */
|
|
|
|
reduce_dependencies(AH, next_work_item, NULL);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Now close parent connection in prep for parallel steps. We do this
|
2009-02-02 21:07:37 +01:00
|
|
|
* mainly to ensure that we don't exceed the specified number of parallel
|
|
|
|
* connections.
|
|
|
|
*/
|
2012-02-16 17:49:20 +01:00
|
|
|
DisconnectDatabase(&AH->public);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* blow away any transient state from the old connection */
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
|
|
|
AH->currUser = NULL;
|
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
|
|
|
AH->currSchema = NULL;
|
|
|
|
if (AH->currTablespace)
|
|
|
|
free(AH->currTablespace);
|
|
|
|
AH->currTablespace = NULL;
|
|
|
|
AH->currWithOids = -1;
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Main engine for parallel restore.
|
|
|
|
*
|
|
|
|
* Work is done in three phases.
|
|
|
|
* First we process all SECTION_PRE_DATA tocEntries, in a single connection,
|
|
|
|
* just as for a standard restore. This is done in restore_toc_entries_prefork().
|
|
|
|
* Second we process the remaining non-ACL steps in parallel worker children
|
|
|
|
* (threads on Windows, processes on Unix), these fork off and set up their
|
|
|
|
* connections before we call restore_toc_entries_parallel_forked.
|
|
|
|
* Finally we process all the ACL entries in a single connection (that happens
|
|
|
|
* back in RestoreArchive).
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
|
|
|
|
TocEntry *pending_list)
|
|
|
|
{
|
|
|
|
int work_status;
|
|
|
|
bool skipped_some;
|
|
|
|
TocEntry ready_list;
|
|
|
|
TocEntry *next_work_item;
|
|
|
|
int ret_child;
|
|
|
|
|
|
|
|
ahlog(AH, 2, "entering restore_toc_entries_parallel\n");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2009-08-08 00:48:34 +02:00
|
|
|
/*
|
2013-03-24 16:27:20 +01:00
|
|
|
* Initialize the lists of ready items, the list for pending items has
|
2014-05-06 18:12:18 +02:00
|
|
|
* already been initialized in the caller. After this setup, the pending
|
2013-03-24 16:27:20 +01:00
|
|
|
* list is everything that needs to be done but is blocked by one or more
|
|
|
|
* dependencies, while the ready list contains items that have no
|
|
|
|
* remaining dependencies. Note: we don't yet filter out entries that
|
|
|
|
* aren't going to be restored. They might participate in dependency
|
2010-02-26 03:01:40 +01:00
|
|
|
* chains connecting entries that should be restored, so we treat them as
|
|
|
|
* live until we actually process them.
|
2009-08-08 00:48:34 +02:00
|
|
|
*/
|
|
|
|
par_list_header_init(&ready_list);
|
2011-02-18 19:11:45 +01:00
|
|
|
skipped_some = false;
|
2010-08-21 15:59:44 +02:00
|
|
|
for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
|
2009-08-08 00:48:34 +02:00
|
|
|
{
|
2011-02-18 19:11:45 +01:00
|
|
|
/* NB: process-or-continue logic must be the inverse of loop above */
|
|
|
|
if (next_work_item->section == SECTION_PRE_DATA)
|
|
|
|
{
|
|
|
|
/* All PRE_DATA items were dealt with above */
|
|
|
|
continue;
|
|
|
|
}
|
2010-08-21 15:59:44 +02:00
|
|
|
if (next_work_item->section == SECTION_DATA ||
|
|
|
|
next_work_item->section == SECTION_POST_DATA)
|
|
|
|
{
|
2011-02-18 19:11:45 +01:00
|
|
|
/* set this flag at same point that previous loop did */
|
|
|
|
skipped_some = true;
|
2010-08-21 15:59:44 +02:00
|
|
|
}
|
2011-02-18 19:11:45 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* SECTION_NONE items must be processed if previous loop didn't */
|
|
|
|
if (!skipped_some)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (next_work_item->depCount > 0)
|
2013-03-24 16:27:20 +01:00
|
|
|
par_list_append(pending_list, next_work_item);
|
2011-02-18 19:11:45 +01:00
|
|
|
else
|
|
|
|
par_list_append(&ready_list, next_work_item);
|
2009-08-08 00:48:34 +02:00
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* main parent loop
|
|
|
|
*
|
|
|
|
* Keep going until there is no worker still running AND there is no work
|
|
|
|
* left to be done.
|
|
|
|
*/
|
|
|
|
|
2009-06-11 16:49:15 +02:00
|
|
|
ahlog(AH, 1, "entering main parallel loop\n");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
while ((next_work_item = get_next_work_item(AH, &ready_list, pstate)) != NULL ||
|
|
|
|
!IsEveryWorkerIdle(pstate))
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
if (next_work_item != NULL)
|
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* If not to be restored, don't waste time launching a worker */
|
|
|
|
if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0 ||
|
|
|
|
_tocEntryIsACL(next_work_item))
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
ahlog(AH, 1, "skipping item %d %s %s\n",
|
|
|
|
next_work_item->dumpId,
|
|
|
|
next_work_item->desc, next_work_item->tag);
|
|
|
|
|
2009-08-08 00:48:34 +02:00
|
|
|
par_list_remove(next_work_item);
|
|
|
|
reduce_dependencies(AH, next_work_item, &ready_list);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
ahlog(AH, 1, "launching item %d %s %s\n",
|
|
|
|
next_work_item->dumpId,
|
|
|
|
next_work_item->desc, next_work_item->tag);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
par_list_remove(next_work_item);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
Assert(GetIdleWorker(pstate) != NO_SLOT);
|
|
|
|
DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE);
|
|
|
|
}
|
|
|
|
else
|
2013-03-26 03:52:28 +01:00
|
|
|
{
|
2013-03-24 16:27:20 +01:00
|
|
|
/* at least one child is working and we have nothing ready. */
|
|
|
|
Assert(!IsEveryWorkerIdle(pstate));
|
2013-03-26 03:52:28 +01:00
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
int nTerm = 0;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* In order to reduce dependencies as soon as possible and
|
|
|
|
* especially to reap the status of workers who are working on
|
|
|
|
* items that pending items depend on, we do a non-blocking check
|
|
|
|
* for ended workers first.
|
|
|
|
*
|
|
|
|
* However, if we do not have any other work items currently that
|
|
|
|
* workers can work on, we do not busy-loop here but instead
|
|
|
|
* really wait for at least one worker to terminate. Hence we call
|
|
|
|
* ListenToWorkers(..., ..., do_wait = true) in this case.
|
|
|
|
*/
|
|
|
|
ListenToWorkers(AH, pstate, !next_work_item);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
while ((ret_child = ReapWorkerStatus(pstate, &work_status)) != NO_SLOT)
|
|
|
|
{
|
|
|
|
nTerm++;
|
|
|
|
mark_work_done(AH, &ready_list, ret_child, work_status, pstate);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* We need to make sure that we have an idle worker before
|
|
|
|
* re-running the loop. If nTerm > 0 we already have that (quick
|
|
|
|
* check).
|
|
|
|
*/
|
|
|
|
if (nTerm > 0)
|
|
|
|
break;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/* if nobody terminated, explicitly check for an idle worker */
|
|
|
|
if (GetIdleWorker(pstate) != NO_SLOT)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have no idle worker, read the result of one or more
|
|
|
|
* workers and loop the loop to call ReapWorkerStatus() on them.
|
|
|
|
*/
|
|
|
|
ListenToWorkers(AH, pstate, true);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-11 16:49:15 +02:00
|
|
|
ahlog(AH, 1, "finished main parallel loop\n");
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
static void
|
|
|
|
restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
|
|
|
|
{
|
|
|
|
RestoreOptions *ropt = AH->ropt;
|
|
|
|
TocEntry *te;
|
|
|
|
|
|
|
|
ahlog(AH, 2, "entering restore_toc_entries_postfork\n");
|
2012-03-20 22:38:11 +01:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Now reconnect the single parent connection.
|
|
|
|
*/
|
|
|
|
ConnectDatabase((Archive *) AH, ropt->dbname,
|
|
|
|
ropt->pghost, ropt->pgport, ropt->username,
|
2009-02-26 17:02:39 +01:00
|
|
|
ropt->promptPassword);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
_doSetFixedOutputState(AH);
|
|
|
|
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* Make sure there is no non-ACL work left due to, say, circular
|
|
|
|
* dependencies, or some other pathological condition. If so, do it in the
|
|
|
|
* single parent connection.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
for (te = pending_list->par_next; te != pending_list; te = te->par_next)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2009-08-08 00:48:34 +02:00
|
|
|
ahlog(AH, 1, "processing missed item %d %s %s\n",
|
|
|
|
te->dumpId, te->desc, te->tag);
|
|
|
|
(void) restore_toc_entry(AH, te, ropt, false);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The ACLs will be handled back in RestoreArchive. */
|
|
|
|
}
|
|
|
|
|
2009-04-12 23:02:44 +02:00
|
|
|
/*
|
|
|
|
* Check if te1 has an exclusive lock requirement for an item that te2 also
|
|
|
|
* requires, whether or not te2's requirement is for an exclusive lock.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
has_lock_conflicts(TocEntry *te1, TocEntry *te2)
|
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
int j,
|
|
|
|
k;
|
2009-04-12 23:02:44 +02:00
|
|
|
|
|
|
|
for (j = 0; j < te1->nLockDeps; j++)
|
|
|
|
{
|
|
|
|
for (k = 0; k < te2->nDeps; k++)
|
|
|
|
{
|
|
|
|
if (te1->lockDeps[j] == te2->dependencies[k])
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-08-08 00:48:34 +02:00
|
|
|
/*
|
|
|
|
* Initialize the header of a parallel-processing list.
|
|
|
|
*
|
|
|
|
* These are circular lists with a dummy TocEntry as header, just like the
|
|
|
|
* main TOC list; but we use separate list links so that an entry can be in
|
|
|
|
* the main TOC list as well as in a parallel-processing list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
par_list_header_init(TocEntry *l)
|
|
|
|
{
|
|
|
|
l->par_prev = l->par_next = l;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Append te to the end of the parallel-processing list headed by l */
|
|
|
|
static void
|
|
|
|
par_list_append(TocEntry *l, TocEntry *te)
|
|
|
|
{
|
|
|
|
te->par_prev = l->par_prev;
|
|
|
|
l->par_prev->par_next = te;
|
|
|
|
l->par_prev = te;
|
|
|
|
te->par_next = l;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove te from whatever parallel-processing list it's in */
|
|
|
|
static void
|
|
|
|
par_list_remove(TocEntry *te)
|
|
|
|
{
|
|
|
|
te->par_prev->par_next = te->par_next;
|
|
|
|
te->par_next->par_prev = te->par_prev;
|
|
|
|
te->par_prev = NULL;
|
|
|
|
te->par_next = NULL;
|
|
|
|
}
|
|
|
|
|
2009-04-12 23:02:44 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Find the next work item (if any) that is capable of being run now.
|
|
|
|
*
|
|
|
|
* To qualify, the item must have no remaining dependencies
|
2009-08-08 00:48:34 +02:00
|
|
|
* and no requirements for locks that are incompatible with
|
|
|
|
* items currently running. Items in the ready_list are known to have
|
|
|
|
* no remaining dependencies, but we have to check for lock conflicts.
|
2009-02-02 21:07:37 +01:00
|
|
|
*
|
2009-08-08 00:48:34 +02:00
|
|
|
* Note that the returned item has *not* been removed from ready_list.
|
|
|
|
* The caller must do that after successfully dispatching the item.
|
2009-02-02 21:07:37 +01:00
|
|
|
*
|
|
|
|
* pref_non_data is for an alternative selection algorithm that gives
|
|
|
|
* preference to non-data items if there is already a data load running.
|
|
|
|
* It is currently disabled.
|
|
|
|
*/
|
|
|
|
static TocEntry *
|
2009-08-08 00:48:34 +02:00
|
|
|
get_next_work_item(ArchiveHandle *AH, TocEntry *ready_list,
|
2013-03-24 16:27:20 +01:00
|
|
|
ParallelState *pstate)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
bool pref_non_data = false; /* or get from AH->ropt */
|
|
|
|
TocEntry *data_te = NULL;
|
|
|
|
TocEntry *te;
|
|
|
|
int i,
|
|
|
|
k;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Bogus heuristics for pref_non_data
|
|
|
|
*/
|
|
|
|
if (pref_non_data)
|
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
int count = 0;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
for (k = 0; k < pstate->numWorkers; k++)
|
|
|
|
if (pstate->parallelSlot[k].args->te != NULL &&
|
|
|
|
pstate->parallelSlot[k].args->te->section == SECTION_DATA)
|
2009-02-02 21:07:37 +01:00
|
|
|
count++;
|
2013-03-24 16:27:20 +01:00
|
|
|
if (pstate->numWorkers == 0 || count * 4 < pstate->numWorkers)
|
2009-02-02 21:07:37 +01:00
|
|
|
pref_non_data = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-08-08 00:48:34 +02:00
|
|
|
* Search the ready_list until we find a suitable item.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2009-08-08 00:48:34 +02:00
|
|
|
for (te = ready_list->par_next; te != ready_list; te = te->par_next)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
bool conflicts = false;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if the item would need exclusive lock on something
|
2009-06-11 16:49:15 +02:00
|
|
|
* that a currently running item also needs lock on, or vice versa. If
|
|
|
|
* so, we don't want to schedule them together.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
for (i = 0; i < pstate->numWorkers && !conflicts; i++)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
TocEntry *running_te;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
if (pstate->parallelSlot[i].workerStatus != WRKR_WORKING)
|
2009-02-02 21:07:37 +01:00
|
|
|
continue;
|
2013-03-24 16:27:20 +01:00
|
|
|
running_te = pstate->parallelSlot[i].args->te;
|
2009-04-12 23:02:44 +02:00
|
|
|
|
|
|
|
if (has_lock_conflicts(te, running_te) ||
|
|
|
|
has_lock_conflicts(running_te, te))
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2009-04-12 23:02:44 +02:00
|
|
|
conflicts = true;
|
|
|
|
break;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conflicts)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (pref_non_data && te->section == SECTION_DATA)
|
|
|
|
{
|
|
|
|
if (data_te == NULL)
|
|
|
|
data_te = te;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* passed all tests, so this item can run */
|
|
|
|
return te;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data_te != NULL)
|
|
|
|
return data_te;
|
|
|
|
|
2009-06-11 16:49:15 +02:00
|
|
|
ahlog(AH, 2, "no item ready\n");
|
2009-02-02 21:07:37 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore a single TOC item in parallel with others
|
|
|
|
*
|
2013-03-24 16:27:20 +01:00
|
|
|
* this is run in the worker, i.e. in a thread (Windows) or a separate process
|
|
|
|
* (everything else). A worker process executes several such work items during
|
|
|
|
* a parallel backup or restore. Once we terminate here and report back that
|
|
|
|
* our work is finished, the master process will assign us a new work item.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
int
|
2013-05-29 22:58:43 +02:00
|
|
|
parallel_restore(ParallelArgs *args)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
ArchiveHandle *AH = args->AH;
|
2009-06-11 16:49:15 +02:00
|
|
|
TocEntry *te = args->te;
|
2009-02-02 21:07:37 +01:00
|
|
|
RestoreOptions *ropt = AH->ropt;
|
2013-03-24 16:27:20 +01:00
|
|
|
int status;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
_doSetFixedOutputState(AH);
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
Assert(AH->connection != NULL);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->public.n_errors = 0;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/* Restore the TOC item */
|
|
|
|
status = restore_toc_entry(AH, te, ropt, true);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
return status;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Housekeeping to be done after a step has been parallel restored.
|
|
|
|
*
|
|
|
|
* Clear the appropriate slot, free all the extra memory we allocated,
|
|
|
|
* update status, and reduce the dependency count of any dependent items.
|
|
|
|
*/
|
|
|
|
static void
|
2009-08-08 00:48:34 +02:00
|
|
|
mark_work_done(ArchiveHandle *AH, TocEntry *ready_list,
|
2013-03-24 16:27:20 +01:00
|
|
|
int worker, int status,
|
|
|
|
ParallelState *pstate)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
TocEntry *te = NULL;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
te = pstate->parallelSlot[worker].args->te;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
if (te == NULL)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not find slot of finished worker\n");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
ahlog(AH, 1, "finished item %d %s %s\n",
|
|
|
|
te->dumpId, te->desc, te->tag);
|
|
|
|
|
|
|
|
if (status == WORKER_CREATE_DONE)
|
|
|
|
mark_create_done(AH, te);
|
|
|
|
else if (status == WORKER_INHIBIT_DATA)
|
|
|
|
{
|
|
|
|
inhibit_data_for_failed_table(AH, te);
|
|
|
|
AH->public.n_errors++;
|
|
|
|
}
|
|
|
|
else if (status == WORKER_IGNORED_ERRORS)
|
|
|
|
AH->public.n_errors++;
|
|
|
|
else if (status != 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "worker process failed: exit code %d\n",
|
|
|
|
status);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2009-08-08 00:48:34 +02:00
|
|
|
reduce_dependencies(AH, te, ready_list);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process the dependency information into a form useful for parallel restore.
|
|
|
|
*
|
2010-12-09 19:03:11 +01:00
|
|
|
* This function takes care of fixing up some missing or badly designed
|
|
|
|
* dependencies, and then prepares subsidiary data structures that will be
|
|
|
|
* used in the main parallel-restore logic, including:
|
2012-05-29 02:38:28 +02:00
|
|
|
* 1. We build the revDeps[] arrays of incoming dependency dumpIds.
|
|
|
|
* 2. We set up depCount fields that are the number of as-yet-unprocessed
|
2009-02-02 21:07:37 +01:00
|
|
|
* dependencies for each TOC entry.
|
|
|
|
*
|
|
|
|
* We also identify locking dependencies so that we can avoid trying to
|
|
|
|
* schedule conflicting items at the same time.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fix_dependencies(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
TocEntry *te;
|
2009-06-11 16:49:15 +02:00
|
|
|
int i;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
2012-05-29 02:38:28 +02:00
|
|
|
* Initialize the depCount/revDeps/nRevDeps fields, and make sure the TOC
|
|
|
|
* items are marked as not being in any parallel-processing list.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
te->depCount = te->nDeps;
|
2010-12-09 19:03:11 +01:00
|
|
|
te->revDeps = NULL;
|
|
|
|
te->nRevDeps = 0;
|
2009-08-08 00:48:34 +02:00
|
|
|
te->par_prev = NULL;
|
|
|
|
te->par_next = NULL;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* POST_DATA items that are shown as depending on a table need to be
|
|
|
|
* re-pointed to depend on that table's data, instead. This ensures they
|
2012-05-29 02:38:28 +02:00
|
|
|
* won't get scheduled until the data has been loaded.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2012-05-29 02:38:28 +02:00
|
|
|
repoint_table_dependencies(AH);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* Pre-8.4 versions of pg_dump neglected to set up a dependency from BLOB
|
|
|
|
* COMMENTS to BLOBS. Cope. (We assume there's only one BLOBS and only
|
|
|
|
* one BLOB COMMENTS in such files.)
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (AH->version < K_VERS_1_11)
|
|
|
|
{
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if (strcmp(te->desc, "BLOB COMMENTS") == 0 && te->nDeps == 0)
|
|
|
|
{
|
|
|
|
TocEntry *te2;
|
|
|
|
|
|
|
|
for (te2 = AH->toc->next; te2 != AH->toc; te2 = te2->next)
|
|
|
|
{
|
|
|
|
if (strcmp(te2->desc, "BLOBS") == 0)
|
|
|
|
{
|
2011-11-25 21:40:51 +01:00
|
|
|
te->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
|
2009-02-02 21:07:37 +01:00
|
|
|
te->dependencies[0] = te2->dumpId;
|
|
|
|
te->nDeps++;
|
|
|
|
te->depCount++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-12-09 19:03:11 +01:00
|
|
|
* At this point we start to build the revDeps reverse-dependency arrays,
|
|
|
|
* so all changes of dependencies must be complete.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Count the incoming dependencies for each item. Also, it is possible
|
Make pg_dump emit more accurate dependency information.
While pg_dump has included dependency information in archive-format output
ever since 7.3, it never made any large effort to ensure that that
information was actually useful. In particular, in common situations where
dependency chains include objects that aren't separately emitted in the
dump, the dependencies shown for objects that were emitted would reference
the dump IDs of these un-dumped objects, leaving no clue about which other
objects the visible objects indirectly depend on. So far, parallel
pg_restore has managed to avoid tripping over this misfeature, but only
by dint of some crude hacks like not trusting dependency information in
the pre-data section of the archive.
It seems prudent to do something about this before it rises up to bite us,
so instead of emitting the "raw" dependencies of each dumped object,
recursively search for its actual dependencies among the subset of objects
that are being dumped.
Back-patch to 9.2, since that code hasn't yet diverged materially from
HEAD. At some point we might need to back-patch further, but right now
there are no known cases where this is actively necessary. (The one known
case, bug #6699, is fixed in a different way by my previous patch.) Since
this patch depends on 9.2 changes that made TOC entries be marked before
output commences as to whether they'll be dumped, back-patching further
would require additional surgery; and as of now there's no evidence that
it's worth the risk.
2012-06-26 03:20:24 +02:00
|
|
|
* that the dependencies list items that are not in the archive at all
|
2013-03-24 16:27:20 +01:00
|
|
|
* (that should not happen in 9.2 and later, but is highly likely in older
|
|
|
|
* archives). Subtract such items from the depCounts.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
{
|
2010-01-19 19:39:19 +01:00
|
|
|
DumpId depid = te->dependencies[i];
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
|
|
|
|
AH->tocsByDumpId[depid]->nRevDeps++;
|
2010-12-09 19:03:11 +01:00
|
|
|
else
|
2009-02-02 21:07:37 +01:00
|
|
|
te->depCount--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-09 19:03:11 +01:00
|
|
|
/*
|
2011-04-10 17:42:00 +02:00
|
|
|
* Allocate space for revDeps[] arrays, and reset nRevDeps so we can use
|
|
|
|
* it as a counter below.
|
2010-12-09 19:03:11 +01:00
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if (te->nRevDeps > 0)
|
2011-11-25 21:40:51 +01:00
|
|
|
te->revDeps = (DumpId *) pg_malloc(te->nRevDeps * sizeof(DumpId));
|
2010-12-09 19:03:11 +01:00
|
|
|
te->nRevDeps = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-04-10 17:42:00 +02:00
|
|
|
* Build the revDeps[] arrays of incoming-dependency dumpIds. This had
|
|
|
|
* better agree with the loops above.
|
2010-12-09 19:03:11 +01:00
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
{
|
|
|
|
DumpId depid = te->dependencies[i];
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
|
2010-12-09 19:03:11 +01:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
TocEntry *otherte = AH->tocsByDumpId[depid];
|
2010-12-09 19:03:11 +01:00
|
|
|
|
|
|
|
otherte->revDeps[otherte->nRevDeps++] = te->dumpId;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Lastly, work out the locking dependencies.
|
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
te->lockDeps = NULL;
|
|
|
|
te->nLockDeps = 0;
|
2012-05-29 02:38:28 +02:00
|
|
|
identify_locking_dependencies(AH, te);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-05-29 02:38:28 +02:00
|
|
|
* Change dependencies on table items to depend on table data items instead,
|
2009-02-02 21:07:37 +01:00
|
|
|
* but only in POST_DATA items.
|
|
|
|
*/
|
|
|
|
static void
|
2012-05-29 02:38:28 +02:00
|
|
|
repoint_table_dependencies(ArchiveHandle *AH)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
TocEntry *te;
|
2009-06-11 16:49:15 +02:00
|
|
|
int i;
|
2012-05-29 02:38:28 +02:00
|
|
|
DumpId olddep;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if (te->section != SECTION_POST_DATA)
|
|
|
|
continue;
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
olddep = te->dependencies[i];
|
|
|
|
if (olddep <= AH->maxDumpId &&
|
|
|
|
AH->tableDataId[olddep] != 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
te->dependencies[i] = AH->tableDataId[olddep];
|
2009-02-02 21:07:37 +01:00
|
|
|
ahlog(AH, 2, "transferring dependency %d -> %d to %d\n",
|
2012-05-29 02:38:28 +02:00
|
|
|
te->dumpId, olddep, AH->tableDataId[olddep]);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Identify which objects we'll need exclusive lock on in order to restore
|
|
|
|
* the given TOC entry (*other* than the one identified by the TOC entry
|
|
|
|
* itself). Record their dump IDs in the entry's lockDeps[] array.
|
|
|
|
*/
|
|
|
|
static void
|
2012-05-29 02:38:28 +02:00
|
|
|
identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
DumpId *lockids;
|
|
|
|
int nlockids;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Quick exit if no dependencies at all */
|
|
|
|
if (te->nDeps == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Exit if this entry doesn't need exclusive lock on other objects */
|
|
|
|
if (!(strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "FK CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "RULE") == 0 ||
|
|
|
|
strcmp(te->desc, "TRIGGER") == 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
2009-03-13 23:50:44 +01:00
|
|
|
* We assume the item requires exclusive lock on each TABLE DATA item
|
2009-06-11 16:49:15 +02:00
|
|
|
* listed among its dependencies. (This was originally a dependency on
|
|
|
|
* the TABLE, but fix_dependencies repointed it to the data item. Note
|
|
|
|
* that all the entry types we are interested in here are POST_DATA, so
|
|
|
|
* they will all have been changed this way.)
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2011-11-25 21:40:51 +01:00
|
|
|
lockids = (DumpId *) pg_malloc(te->nDeps * sizeof(DumpId));
|
2009-02-02 21:07:37 +01:00
|
|
|
nlockids = 0;
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
DumpId depid = te->dependencies[i];
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL &&
|
|
|
|
strcmp(AH->tocsByDumpId[depid]->desc, "TABLE DATA") == 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
lockids[nlockids++] = depid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nlockids == 0)
|
|
|
|
{
|
|
|
|
free(lockids);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-11-30 02:41:06 +01:00
|
|
|
te->lockDeps = pg_realloc(lockids, nlockids * sizeof(DumpId));
|
2009-02-02 21:07:37 +01:00
|
|
|
te->nLockDeps = nlockids;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the specified TOC entry from the depCounts of items that depend on
|
2009-08-08 00:48:34 +02:00
|
|
|
* it, thereby possibly making them ready-to-run. Any pending item that
|
|
|
|
* becomes ready should be moved to the ready list.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
static void
|
2009-08-08 00:48:34 +02:00
|
|
|
reduce_dependencies(ArchiveHandle *AH, TocEntry *te, TocEntry *ready_list)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2009-06-11 16:49:15 +02:00
|
|
|
int i;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2010-12-09 19:03:11 +01:00
|
|
|
ahlog(AH, 2, "reducing dependencies for %d\n", te->dumpId);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2010-12-09 19:03:11 +01:00
|
|
|
for (i = 0; i < te->nRevDeps; i++)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
TocEntry *otherte = AH->tocsByDumpId[te->revDeps[i]];
|
2010-12-09 19:03:11 +01:00
|
|
|
|
|
|
|
otherte->depCount--;
|
|
|
|
if (otherte->depCount == 0 && otherte->par_prev != NULL)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2010-12-09 19:03:11 +01:00
|
|
|
/* It must be in the pending list, so remove it ... */
|
|
|
|
par_list_remove(otherte);
|
|
|
|
/* ... and add to ready_list */
|
|
|
|
par_list_append(ready_list, otherte);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the created flag on the DATA member corresponding to the given
|
|
|
|
* TABLE member
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mark_create_done(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
if (AH->tableDataId[te->dumpId] != 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
|
|
|
|
|
|
|
|
ted->created = true;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark the DATA member corresponding to the given TABLE member
|
|
|
|
* as not wanted
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
ahlog(AH, 1, "table \"%s\" could not be created, will not restore its data\n",
|
|
|
|
te->tag);
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
if (AH->tableDataId[te->dumpId] != 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
|
|
|
|
|
|
|
|
ted->reqs = 0;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clone and de-clone routines used in parallel restoration.
|
|
|
|
*
|
|
|
|
* Enough of the structure is cloned to ensure that there is no
|
|
|
|
* conflict between different threads each with their own clone.
|
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
ArchiveHandle *
|
2009-02-02 21:07:37 +01:00
|
|
|
CloneArchive(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
ArchiveHandle *clone;
|
|
|
|
|
|
|
|
/* Make a "flat" copy */
|
2011-11-25 21:40:51 +01:00
|
|
|
clone = (ArchiveHandle *) pg_malloc(sizeof(ArchiveHandle));
|
2009-02-02 21:07:37 +01:00
|
|
|
memcpy(clone, AH, sizeof(ArchiveHandle));
|
|
|
|
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
/* Handle format-independent fields */
|
|
|
|
memset(&(clone->sqlparse), 0, sizeof(clone->sqlparse));
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* The clone will have its own connection, so disregard connection state */
|
|
|
|
clone->connection = NULL;
|
|
|
|
clone->currUser = NULL;
|
|
|
|
clone->currSchema = NULL;
|
|
|
|
clone->currTablespace = NULL;
|
|
|
|
clone->currWithOids = -1;
|
|
|
|
|
|
|
|
/* savedPassword must be local in case we change it while connecting */
|
|
|
|
if (clone->savedPassword)
|
2011-11-25 21:40:51 +01:00
|
|
|
clone->savedPassword = pg_strdup(clone->savedPassword);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* clone has its own error count, too */
|
|
|
|
clone->public.n_errors = 0;
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* Connect our new clone object to the database: In parallel restore the
|
|
|
|
* parent is already disconnected, because we can connect the worker
|
|
|
|
* processes independently to the database (no snapshot sync required). In
|
|
|
|
* parallel backup we clone the parent's existing connection.
|
|
|
|
*/
|
|
|
|
if (AH->mode == archModeRead)
|
|
|
|
{
|
|
|
|
RestoreOptions *ropt = AH->ropt;
|
|
|
|
|
|
|
|
Assert(AH->connection == NULL);
|
|
|
|
/* this also sets clone->connection */
|
|
|
|
ConnectDatabase((Archive *) clone, ropt->dbname,
|
|
|
|
ropt->pghost, ropt->pgport, ropt->username,
|
|
|
|
ropt->promptPassword);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
char *dbname;
|
|
|
|
char *pghost;
|
|
|
|
char *pgport;
|
|
|
|
char *username;
|
|
|
|
const char *encname;
|
|
|
|
|
|
|
|
Assert(AH->connection != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Even though we are technically accessing the parent's database
|
|
|
|
* object here, these functions are fine to be called like that
|
|
|
|
* because all just return a pointer and do not actually send/receive
|
|
|
|
* any data to/from the database.
|
|
|
|
*/
|
|
|
|
dbname = PQdb(AH->connection);
|
|
|
|
pghost = PQhost(AH->connection);
|
|
|
|
pgport = PQport(AH->connection);
|
|
|
|
username = PQuser(AH->connection);
|
|
|
|
encname = pg_encoding_to_char(AH->public.encoding);
|
|
|
|
|
|
|
|
/* this also sets clone->connection */
|
|
|
|
ConnectDatabase((Archive *) clone, dbname, pghost, pgport, username, TRI_NO);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the same encoding, whatever we set here is what we got from
|
|
|
|
* pg_encoding_to_char(), so we really shouldn't run into an error
|
|
|
|
* setting that very same value. Also see the comment in
|
|
|
|
* SetupConnection().
|
|
|
|
*/
|
|
|
|
PQsetClientEncoding(clone->connection, encname);
|
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/* Let the format-specific code have a chance too */
|
|
|
|
(clone->ClonePtr) (clone);
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
Assert(clone->connection != NULL);
|
2009-02-02 21:07:37 +01:00
|
|
|
return clone;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release clone-local storage.
|
|
|
|
*
|
|
|
|
* Note: we assume any clone-local connection was already closed.
|
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
void
|
2009-02-02 21:07:37 +01:00
|
|
|
DeCloneArchive(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
/* Clear format-specific state */
|
|
|
|
(AH->DeClonePtr) (AH);
|
|
|
|
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
/* Clear state allocated by CloneArchive */
|
|
|
|
if (AH->sqlparse.curCmd)
|
|
|
|
destroyPQExpBuffer(AH->sqlparse.curCmd);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* Clear any connection-local state */
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
|
|
|
if (AH->currTablespace)
|
|
|
|
free(AH->currTablespace);
|
|
|
|
if (AH->savedPassword)
|
|
|
|
free(AH->savedPassword);
|
|
|
|
|
|
|
|
free(AH);
|
|
|
|
}
|