2000-07-04 16:25:28 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_backup_archiver.c
|
|
|
|
*
|
|
|
|
* Private implementation of the archiver routines.
|
|
|
|
*
|
|
|
|
* See the headers to pg_restore for more details.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2000, Philip Warner
|
|
|
|
* Rights are granted to use this software in any way so long
|
|
|
|
* as this notice is not removed.
|
|
|
|
*
|
|
|
|
* The author is not responsible for loss or damages that may
|
2002-08-10 18:57:32 +02:00
|
|
|
* result from its use.
|
2000-07-04 16:25:28 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/bin/pg_dump/pg_backup_archiver.c
|
2002-01-18 18:13:51 +01:00
|
|
|
*
|
2000-07-04 16:25:28 +02:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2014-10-14 20:00:55 +02:00
|
|
|
#include "postgres_fe.h"
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2002-02-11 01:18:20 +01:00
|
|
|
#include <ctype.h>
|
2013-03-24 16:27:20 +01:00
|
|
|
#include <fcntl.h>
|
2002-08-27 20:57:26 +02:00
|
|
|
#include <unistd.h>
|
2011-01-23 22:10:15 +01:00
|
|
|
#include <sys/stat.h>
|
2009-02-02 21:07:37 +01:00
|
|
|
#include <sys/wait.h>
|
2005-01-26 20:44:43 +01:00
|
|
|
#ifdef WIN32
|
|
|
|
#include <io.h>
|
|
|
|
#endif
|
|
|
|
|
2020-09-22 22:03:32 +02:00
|
|
|
#include "common/string.h"
|
2019-10-23 06:08:53 +02:00
|
|
|
#include "dumputils.h"
|
|
|
|
#include "fe_utils/string_utils.h"
|
2020-09-22 22:03:32 +02:00
|
|
|
#include "lib/stringinfo.h"
|
2019-10-23 06:08:53 +02:00
|
|
|
#include "libpq/libpq-fs.h"
|
2017-02-25 22:12:24 +01:00
|
|
|
#include "parallel.h"
|
|
|
|
#include "pg_backup_archiver.h"
|
|
|
|
#include "pg_backup_db.h"
|
|
|
|
#include "pg_backup_utils.h"
|
2002-08-18 11:36:26 +02:00
|
|
|
|
2012-01-03 22:02:49 +01:00
|
|
|
#define TEXT_DUMP_HEADER "--\n-- PostgreSQL database dump\n--\n\n"
|
|
|
|
#define TEXT_DUMPALL_HEADER "--\n-- PostgreSQL database cluster dump\n--\n\n"
|
|
|
|
|
2011-01-22 23:56:42 +01:00
|
|
|
/* state needed to save/restore an archive's output target */
|
|
|
|
typedef struct _outputContext
|
|
|
|
{
|
|
|
|
void *OF;
|
|
|
|
int gzOut;
|
|
|
|
} OutputContext;
|
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/*
|
|
|
|
* State for tracking TocEntrys that are ready to process during a parallel
|
|
|
|
* restore. (This used to be a list, and we still call it that, though now
|
|
|
|
* it's really an array so that we can apply qsort to it.)
|
|
|
|
*
|
|
|
|
* tes[] is sized large enough that we can't overrun it.
|
|
|
|
* The valid entries are indexed first_te .. last_te inclusive.
|
|
|
|
* We periodically sort the array to bring larger-by-dataLength entries to
|
|
|
|
* the front; "sorted" is true if the valid entries are known sorted.
|
|
|
|
*/
|
|
|
|
typedef struct _parallelReadyList
|
|
|
|
{
|
|
|
|
TocEntry **tes; /* Ready-to-dump TocEntrys */
|
|
|
|
int first_te; /* index of first valid entry in tes[] */
|
|
|
|
int last_te; /* index of last valid entry in tes[] */
|
|
|
|
bool sorted; /* are valid entries currently sorted? */
|
|
|
|
} ParallelReadyList;
|
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
|
2017-03-22 15:00:30 +01:00
|
|
|
const int compression, bool dosync, ArchiveMode mode,
|
2016-08-30 18:00:00 +02:00
|
|
|
SetupWorkerPtrType setupWorkerPtr);
|
2020-08-25 07:24:15 +02:00
|
|
|
static void _getObjectDescription(PQExpBuffer buf, TocEntry *te);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData);
|
2019-02-01 15:29:42 +01:00
|
|
|
static char *sanitize_line(const char *str, bool want_hyphen);
|
2004-02-24 04:35:19 +01:00
|
|
|
static void _doSetFixedOutputState(ArchiveHandle *AH);
|
2002-08-18 11:36:26 +02:00
|
|
|
static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
|
2004-09-10 22:05:18 +02:00
|
|
|
static void _reconnectToDB(ArchiveHandle *AH, const char *dbname);
|
2003-09-24 00:48:53 +02:00
|
|
|
static void _becomeUser(ArchiveHandle *AH, const char *user);
|
|
|
|
static void _becomeOwner(ArchiveHandle *AH, TocEntry *te);
|
2002-05-11 00:36:27 +02:00
|
|
|
static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName);
|
2004-11-06 20:36:02 +01:00
|
|
|
static void _selectTablespace(ArchiveHandle *AH, const char *tablespace);
|
2019-03-06 18:54:38 +01:00
|
|
|
static void _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam);
|
2006-05-28 23:13:54 +02:00
|
|
|
static void processEncodingEntry(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void processStdStringsEntry(ArchiveHandle *AH, TocEntry *te);
|
Avoid using unsafe search_path settings during dump and restore.
Historically, pg_dump has "set search_path = foo, pg_catalog" when
dumping an object in schema "foo", and has also caused that setting
to be used while restoring the object. This is problematic because
functions and operators in schema "foo" could capture references meant
to refer to pg_catalog entries, both in the queries issued by pg_dump
and those issued during the subsequent restore run. That could
result in dump/restore misbehavior, or in privilege escalation if a
nefarious user installs trojan-horse functions or operators.
This patch changes pg_dump so that it does not change the search_path
dynamically. The emitted restore script sets the search_path to what
was used at dump time, and then leaves it alone thereafter. Created
objects are placed in the correct schema, regardless of the active
search_path, by dint of schema-qualifying their names in the CREATE
commands, as well as in subsequent ALTER and ALTER-like commands.
Since this change requires a change in the behavior of pg_restore
when processing an archive file made according to this new convention,
bump the archive file version number; old versions of pg_restore will
therefore refuse to process files made with new versions of pg_dump.
Security: CVE-2018-1058
2018-02-26 16:18:21 +01:00
|
|
|
static void processSearchPathEntry(ArchiveHandle *AH, TocEntry *te);
|
2020-12-11 19:15:30 +01:00
|
|
|
static int _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
static RestorePass _tocEntryRestorePass(TocEntry *te);
|
2010-02-18 02:29:10 +01:00
|
|
|
static bool _tocEntryIsACL(TocEntry *te);
|
2016-01-13 23:48:33 +01:00
|
|
|
static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te);
|
2012-05-29 02:38:28 +02:00
|
|
|
static void buildTocEntryArrays(ArchiveHandle *AH);
|
2020-08-25 07:24:15 +02:00
|
|
|
static void _moveBefore(TocEntry *pos, TocEntry *te);
|
2000-07-04 16:25:28 +02:00
|
|
|
static int _discoverArchiveFormat(ArchiveHandle *AH);
|
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
static int RestoringToDB(ArchiveHandle *AH);
|
2005-06-21 22:45:44 +02:00
|
|
|
static void dump_lo_buf(ArchiveHandle *AH);
|
2005-04-15 18:40:36 +02:00
|
|
|
static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
|
2012-02-07 22:20:29 +01:00
|
|
|
static void SetOutput(ArchiveHandle *AH, const char *filename, int compression);
|
2011-01-22 23:56:42 +01:00
|
|
|
static OutputContext SaveOutput(ArchiveHandle *AH);
|
|
|
|
static void RestoreOutput(ArchiveHandle *AH, OutputContext savedContext);
|
2005-04-15 18:40:36 +02:00
|
|
|
|
2016-01-13 23:48:33 +01:00
|
|
|
static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
static void restore_toc_entries_prefork(ArchiveHandle *AH,
|
|
|
|
TocEntry *pending_list);
|
|
|
|
static void restore_toc_entries_parallel(ArchiveHandle *AH,
|
|
|
|
ParallelState *pstate,
|
|
|
|
TocEntry *pending_list);
|
|
|
|
static void restore_toc_entries_postfork(ArchiveHandle *AH,
|
2013-03-24 16:27:20 +01:00
|
|
|
TocEntry *pending_list);
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
static void pending_list_header_init(TocEntry *l);
|
|
|
|
static void pending_list_append(TocEntry *l, TocEntry *te);
|
|
|
|
static void pending_list_remove(TocEntry *te);
|
|
|
|
static void ready_list_init(ParallelReadyList *ready_list, int tocCount);
|
|
|
|
static void ready_list_free(ParallelReadyList *ready_list);
|
|
|
|
static void ready_list_insert(ParallelReadyList *ready_list, TocEntry *te);
|
|
|
|
static void ready_list_remove(ParallelReadyList *ready_list, int i);
|
|
|
|
static void ready_list_sort(ParallelReadyList *ready_list);
|
|
|
|
static int TocEntrySizeCompare(const void *p1, const void *p2);
|
|
|
|
static void move_to_ready_list(TocEntry *pending_list,
|
|
|
|
ParallelReadyList *ready_list,
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
RestorePass pass);
|
2020-08-25 07:24:15 +02:00
|
|
|
static TocEntry *pop_next_work_item(ParallelReadyList *ready_list,
|
2013-03-24 16:27:20 +01:00
|
|
|
ParallelState *pstate);
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
static void mark_dump_job_done(ArchiveHandle *AH,
|
|
|
|
TocEntry *te,
|
|
|
|
int status,
|
|
|
|
void *callback_data);
|
|
|
|
static void mark_restore_job_done(ArchiveHandle *AH,
|
|
|
|
TocEntry *te,
|
|
|
|
int status,
|
|
|
|
void *callback_data);
|
2009-02-02 21:07:37 +01:00
|
|
|
static void fix_dependencies(ArchiveHandle *AH);
|
2009-04-12 23:02:44 +02:00
|
|
|
static bool has_lock_conflicts(TocEntry *te1, TocEntry *te2);
|
2012-05-29 02:38:28 +02:00
|
|
|
static void repoint_table_dependencies(ArchiveHandle *AH);
|
|
|
|
static void identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te);
|
2009-08-08 00:48:34 +02:00
|
|
|
static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te,
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ParallelReadyList *ready_list);
|
2009-02-02 21:07:37 +01:00
|
|
|
static void mark_create_done(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te);
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2015-09-14 15:19:49 +02:00
|
|
|
static void StrictNamesCheck(RestoreOptions *ropt);
|
|
|
|
|
|
|
|
|
2014-10-14 20:00:55 +02:00
|
|
|
/*
|
2015-01-11 19:28:26 +01:00
|
|
|
* Allocate a new DumpOptions block containing all default values.
|
2014-10-14 20:00:55 +02:00
|
|
|
*/
|
|
|
|
DumpOptions *
|
|
|
|
NewDumpOptions(void)
|
|
|
|
{
|
2015-01-11 19:28:26 +01:00
|
|
|
DumpOptions *opts = (DumpOptions *) pg_malloc(sizeof(DumpOptions));
|
2014-10-14 20:00:55 +02:00
|
|
|
|
2015-01-11 19:28:26 +01:00
|
|
|
InitDumpOptions(opts);
|
|
|
|
return opts;
|
|
|
|
}
|
2014-10-14 20:00:55 +02:00
|
|
|
|
2015-01-11 19:28:26 +01:00
|
|
|
/*
|
|
|
|
* Initialize a DumpOptions struct to all default values
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
InitDumpOptions(DumpOptions *opts)
|
|
|
|
{
|
|
|
|
memset(opts, 0, sizeof(DumpOptions));
|
2014-10-14 20:00:55 +02:00
|
|
|
/* set any fields that shouldn't default to zeroes */
|
|
|
|
opts->include_everything = true;
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
opts->cparams.promptPassword = TRI_DEFAULT;
|
2014-10-14 20:00:55 +02:00
|
|
|
opts->dumpSections = DUMP_UNSECTIONED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a freshly allocated DumpOptions with options equivalent to those
|
|
|
|
* found in the given RestoreOptions.
|
|
|
|
*/
|
|
|
|
DumpOptions *
|
|
|
|
dumpOptionsFromRestoreOptions(RestoreOptions *ropt)
|
|
|
|
{
|
|
|
|
DumpOptions *dopt = NewDumpOptions();
|
|
|
|
|
|
|
|
/* this is the inverse of what's at the end of pg_dump.c's main() */
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
dopt->cparams.dbname = ropt->cparams.dbname ? pg_strdup(ropt->cparams.dbname) : NULL;
|
|
|
|
dopt->cparams.pgport = ropt->cparams.pgport ? pg_strdup(ropt->cparams.pgport) : NULL;
|
|
|
|
dopt->cparams.pghost = ropt->cparams.pghost ? pg_strdup(ropt->cparams.pghost) : NULL;
|
|
|
|
dopt->cparams.username = ropt->cparams.username ? pg_strdup(ropt->cparams.username) : NULL;
|
|
|
|
dopt->cparams.promptPassword = ropt->cparams.promptPassword;
|
2014-10-14 20:00:55 +02:00
|
|
|
dopt->outputClean = ropt->dropSchema;
|
|
|
|
dopt->dataOnly = ropt->dataOnly;
|
|
|
|
dopt->schemaOnly = ropt->schemaOnly;
|
|
|
|
dopt->if_exists = ropt->if_exists;
|
|
|
|
dopt->column_inserts = ropt->column_inserts;
|
|
|
|
dopt->dumpSections = ropt->dumpSections;
|
|
|
|
dopt->aclsSkip = ropt->aclsSkip;
|
|
|
|
dopt->outputSuperuser = ropt->superuser;
|
|
|
|
dopt->outputCreateDB = ropt->createDB;
|
|
|
|
dopt->outputNoOwner = ropt->noOwner;
|
|
|
|
dopt->outputNoTablespaces = ropt->noTablespace;
|
|
|
|
dopt->disable_triggers = ropt->disable_triggers;
|
|
|
|
dopt->use_setsessauth = ropt->use_setsessauth;
|
|
|
|
dopt->disable_dollar_quoting = ropt->disable_dollar_quoting;
|
|
|
|
dopt->dump_inserts = ropt->dump_inserts;
|
Support --no-comments in pg_dump, pg_dumpall, pg_restore.
We have switches already to suppress other subsidiary object properties,
such as ACLs, security labels, ownership, and tablespaces, so just on
the grounds of symmetry we should allow suppressing comments as well.
Also, commit 0d4e6ed30 added a positive reason to have this feature,
i.e. to allow obtaining the old behavior of selective pg_restore should
anyone desire that.
Recent commits have removed the cases where pg_dump emitted comments on
built-in objects that the restoring user might not have privileges to
comment on, so the original primary motivation for this feature is gone,
but it still seems at least somewhat useful in its own right.
Robins Tharakan, reviewed by Fabrízio Mello
Discussion: https://postgr.es/m/CAEP4nAx22Z4ch74oJGzr5RyyjcyUSbpiFLyeYXX8pehfou92ug@mail.gmail.com
2018-01-25 21:27:24 +01:00
|
|
|
dopt->no_comments = ropt->no_comments;
|
2017-05-12 15:15:40 +02:00
|
|
|
dopt->no_publications = ropt->no_publications;
|
2014-10-14 20:00:55 +02:00
|
|
|
dopt->no_security_labels = ropt->no_security_labels;
|
2017-05-09 16:58:06 +02:00
|
|
|
dopt->no_subscriptions = ropt->no_subscriptions;
|
2014-10-14 20:00:55 +02:00
|
|
|
dopt->lockWaitTimeout = ropt->lockWaitTimeout;
|
|
|
|
dopt->include_everything = ropt->include_everything;
|
|
|
|
dopt->enable_row_security = ropt->enable_row_security;
|
2016-08-23 18:00:00 +02:00
|
|
|
dopt->sequence_data = ropt->sequence_data;
|
2014-10-14 20:00:55 +02:00
|
|
|
|
|
|
|
return dopt;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/*
|
|
|
|
* Wrapper functions.
|
|
|
|
*
|
2020-06-12 14:05:10 +02:00
|
|
|
* The objective is to make writing new formats and dumpers as simple
|
2000-07-04 16:25:28 +02:00
|
|
|
* as possible, if necessary at the expense of extra function calls etc.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* The dump worker setup needs lots of knowledge of the internals of pg_dump,
|
2020-06-12 14:05:10 +02:00
|
|
|
* so it's defined in pg_dump.c and passed into OpenArchive. The restore worker
|
2013-03-24 16:27:20 +01:00
|
|
|
* setup doesn't need to know anything much, so it's defined here.
|
|
|
|
*/
|
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
setupRestoreWorker(Archive *AHX)
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->ReopenPtr(AH);
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
/* Create a new archive */
|
|
|
|
/* Public */
|
2000-07-21 13:40:08 +02:00
|
|
|
Archive *
|
|
|
|
CreateArchive(const char *FileSpec, const ArchiveFormat fmt,
|
2017-03-22 15:00:30 +01:00
|
|
|
const int compression, bool dosync, ArchiveMode mode,
|
2016-08-30 18:00:00 +02:00
|
|
|
SetupWorkerPtrType setupDumpWorker)
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2017-03-22 15:00:30 +01:00
|
|
|
ArchiveHandle *AH = _allocAH(FileSpec, fmt, compression, dosync,
|
|
|
|
mode, setupDumpWorker);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
return (Archive *) AH;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Open an existing archive */
|
|
|
|
/* Public */
|
2000-07-21 13:40:08 +02:00
|
|
|
Archive *
|
|
|
|
OpenArchive(const char *FileSpec, const ArchiveFormat fmt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2017-03-22 15:00:30 +01:00
|
|
|
ArchiveHandle *AH = _allocAH(FileSpec, fmt, 0, true, archModeRead, setupRestoreWorker);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
return (Archive *) AH;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
|
|
|
void
|
2016-01-13 23:48:33 +01:00
|
|
|
CloseArchive(Archive *AHX)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-01-12 05:32:07 +01:00
|
|
|
int res = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->ClosePtr(AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
/* Close the output */
|
|
|
|
if (AH->gzOut)
|
2001-01-12 05:32:07 +01:00
|
|
|
res = GZCLOSE(AH->OF);
|
2000-07-04 16:25:28 +02:00
|
|
|
else if (AH->OF != stdout)
|
2001-01-12 05:32:07 +01:00
|
|
|
res = fclose(AH->OF);
|
|
|
|
|
|
|
|
if (res != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not close output file: %m");
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
|
|
|
void
|
2016-01-13 23:48:33 +01:00
|
|
|
SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
/* Caller can omit dump options, in which case we synthesize them */
|
|
|
|
if (dopt == NULL && ropt != NULL)
|
|
|
|
dopt = dumpOptionsFromRestoreOptions(ropt);
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
|
|
|
|
/* Save options for later access */
|
2016-01-13 23:48:33 +01:00
|
|
|
AH->dopt = dopt;
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
AH->ropt = ropt;
|
2016-01-13 23:48:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
|
|
|
void
|
|
|
|
ProcessArchiveRestoreOptions(Archive *AHX)
|
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
TocEntry *te;
|
|
|
|
teSection curSection;
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
|
|
|
|
/* Decide which TOC entries will be dumped/restored, and mark them */
|
|
|
|
curSection = SECTION_PRE_DATA;
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
Improve pg_dump's dependency-sorting logic to enforce section dump order.
As of 9.2, with the --section option, it is very important that the concept
of "pre data", "data", and "post data" sections of the output be honored
strictly; else a dump divided into separate sectional files might be
unrestorable. However, the dependency-sorting logic knew nothing of
sections and would happily select output orderings that didn't fit that
structure. Doing so was mostly harmless before 9.2, but now we need to be
sure it doesn't do that. To fix, create dummy objects representing the
section boundaries and add dependencies between them and all the normal
objects. (This might sound expensive but it seems to only add a percent or
two to pg_dump's runtime.)
This also fixes a problem introduced in 9.1 by the feature that allows
incomplete GROUP BY lists when a primary key is given in GROUP BY.
That means that views can depend on primary key constraints. Previously,
pg_dump would deal with that by simply emitting the primary key constraint
before the view definition (and hence before the data section of the
output). That's bad enough for simple serial restores, where creating an
index before the data is loaded works, but is undesirable for speed
reasons. But it could lead to outright failure of parallel restores, as
seen in bug #6699 from Joe Van Dyk. That happened because pg_restore would
switch into parallel mode as soon as it reached the constraint, and then
very possibly would try to emit the view definition before the primary key
was committed (as a consequence of another bug that causes the view not to
be correctly marked as depending on the constraint). Adding the section
boundary constraints forces the dependency-sorting code to break the view
into separate table and rule declarations, allowing the rule, and hence the
primary key constraint it depends on, to revert to their intended location
in the post-data section. This also somewhat accidentally works around the
bogus-dependency-marking problem, because the rule will be correctly shown
as depending on the constraint, so parallel pg_restore will now do the
right thing. (We will fix the bogus-dependency problem for real in a
separate patch, but that patch is not easily back-portable to 9.1, so the
fact that this patch is enough to dodge the only known symptom is
fortunate.)
Back-patch to 9.1, except for the hunk that adds verification that the
finished archive TOC list is in correct section order; the place where
it was convenient to add that doesn't exist in 9.1.
2012-06-26 03:19:10 +02:00
|
|
|
/*
|
|
|
|
* When writing an archive, we also take this opportunity to check
|
|
|
|
* that we have generated the entries in a sane order that respects
|
|
|
|
* the section divisions. When reading, don't complain, since buggy
|
|
|
|
* old versions of pg_dump might generate out-of-order archives.
|
|
|
|
*/
|
|
|
|
if (AH->mode != archModeRead)
|
|
|
|
{
|
|
|
|
switch (te->section)
|
|
|
|
{
|
|
|
|
case SECTION_NONE:
|
|
|
|
/* ok to be anywhere */
|
|
|
|
break;
|
|
|
|
case SECTION_PRE_DATA:
|
|
|
|
if (curSection != SECTION_PRE_DATA)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("archive items not in correct section order");
|
Improve pg_dump's dependency-sorting logic to enforce section dump order.
As of 9.2, with the --section option, it is very important that the concept
of "pre data", "data", and "post data" sections of the output be honored
strictly; else a dump divided into separate sectional files might be
unrestorable. However, the dependency-sorting logic knew nothing of
sections and would happily select output orderings that didn't fit that
structure. Doing so was mostly harmless before 9.2, but now we need to be
sure it doesn't do that. To fix, create dummy objects representing the
section boundaries and add dependencies between them and all the normal
objects. (This might sound expensive but it seems to only add a percent or
two to pg_dump's runtime.)
This also fixes a problem introduced in 9.1 by the feature that allows
incomplete GROUP BY lists when a primary key is given in GROUP BY.
That means that views can depend on primary key constraints. Previously,
pg_dump would deal with that by simply emitting the primary key constraint
before the view definition (and hence before the data section of the
output). That's bad enough for simple serial restores, where creating an
index before the data is loaded works, but is undesirable for speed
reasons. But it could lead to outright failure of parallel restores, as
seen in bug #6699 from Joe Van Dyk. That happened because pg_restore would
switch into parallel mode as soon as it reached the constraint, and then
very possibly would try to emit the view definition before the primary key
was committed (as a consequence of another bug that causes the view not to
be correctly marked as depending on the constraint). Adding the section
boundary constraints forces the dependency-sorting code to break the view
into separate table and rule declarations, allowing the rule, and hence the
primary key constraint it depends on, to revert to their intended location
in the post-data section. This also somewhat accidentally works around the
bogus-dependency-marking problem, because the rule will be correctly shown
as depending on the constraint, so parallel pg_restore will now do the
right thing. (We will fix the bogus-dependency problem for real in a
separate patch, but that patch is not easily back-portable to 9.1, so the
fact that this patch is enough to dodge the only known symptom is
fortunate.)
Back-patch to 9.1, except for the hunk that adds verification that the
finished archive TOC list is in correct section order; the place where
it was convenient to add that doesn't exist in 9.1.
2012-06-26 03:19:10 +02:00
|
|
|
break;
|
|
|
|
case SECTION_DATA:
|
|
|
|
if (curSection == SECTION_POST_DATA)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("archive items not in correct section order");
|
Improve pg_dump's dependency-sorting logic to enforce section dump order.
As of 9.2, with the --section option, it is very important that the concept
of "pre data", "data", and "post data" sections of the output be honored
strictly; else a dump divided into separate sectional files might be
unrestorable. However, the dependency-sorting logic knew nothing of
sections and would happily select output orderings that didn't fit that
structure. Doing so was mostly harmless before 9.2, but now we need to be
sure it doesn't do that. To fix, create dummy objects representing the
section boundaries and add dependencies between them and all the normal
objects. (This might sound expensive but it seems to only add a percent or
two to pg_dump's runtime.)
This also fixes a problem introduced in 9.1 by the feature that allows
incomplete GROUP BY lists when a primary key is given in GROUP BY.
That means that views can depend on primary key constraints. Previously,
pg_dump would deal with that by simply emitting the primary key constraint
before the view definition (and hence before the data section of the
output). That's bad enough for simple serial restores, where creating an
index before the data is loaded works, but is undesirable for speed
reasons. But it could lead to outright failure of parallel restores, as
seen in bug #6699 from Joe Van Dyk. That happened because pg_restore would
switch into parallel mode as soon as it reached the constraint, and then
very possibly would try to emit the view definition before the primary key
was committed (as a consequence of another bug that causes the view not to
be correctly marked as depending on the constraint). Adding the section
boundary constraints forces the dependency-sorting code to break the view
into separate table and rule declarations, allowing the rule, and hence the
primary key constraint it depends on, to revert to their intended location
in the post-data section. This also somewhat accidentally works around the
bogus-dependency-marking problem, because the rule will be correctly shown
as depending on the constraint, so parallel pg_restore will now do the
right thing. (We will fix the bogus-dependency problem for real in a
separate patch, but that patch is not easily back-portable to 9.1, so the
fact that this patch is enough to dodge the only known symptom is
fortunate.)
Back-patch to 9.1, except for the hunk that adds verification that the
finished archive TOC list is in correct section order; the place where
it was convenient to add that doesn't exist in 9.1.
2012-06-26 03:19:10 +02:00
|
|
|
break;
|
|
|
|
case SECTION_POST_DATA:
|
|
|
|
/* ok no matter which section we were in */
|
|
|
|
break;
|
|
|
|
default:
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("unexpected section code %d",
|
Improve pg_dump's dependency-sorting logic to enforce section dump order.
As of 9.2, with the --section option, it is very important that the concept
of "pre data", "data", and "post data" sections of the output be honored
strictly; else a dump divided into separate sectional files might be
unrestorable. However, the dependency-sorting logic knew nothing of
sections and would happily select output orderings that didn't fit that
structure. Doing so was mostly harmless before 9.2, but now we need to be
sure it doesn't do that. To fix, create dummy objects representing the
section boundaries and add dependencies between them and all the normal
objects. (This might sound expensive but it seems to only add a percent or
two to pg_dump's runtime.)
This also fixes a problem introduced in 9.1 by the feature that allows
incomplete GROUP BY lists when a primary key is given in GROUP BY.
That means that views can depend on primary key constraints. Previously,
pg_dump would deal with that by simply emitting the primary key constraint
before the view definition (and hence before the data section of the
output). That's bad enough for simple serial restores, where creating an
index before the data is loaded works, but is undesirable for speed
reasons. But it could lead to outright failure of parallel restores, as
seen in bug #6699 from Joe Van Dyk. That happened because pg_restore would
switch into parallel mode as soon as it reached the constraint, and then
very possibly would try to emit the view definition before the primary key
was committed (as a consequence of another bug that causes the view not to
be correctly marked as depending on the constraint). Adding the section
boundary constraints forces the dependency-sorting code to break the view
into separate table and rule declarations, allowing the rule, and hence the
primary key constraint it depends on, to revert to their intended location
in the post-data section. This also somewhat accidentally works around the
bogus-dependency-marking problem, because the rule will be correctly shown
as depending on the constraint, so parallel pg_restore will now do the
right thing. (We will fix the bogus-dependency problem for real in a
separate patch, but that patch is not easily back-portable to 9.1, so the
fact that this patch is enough to dodge the only known symptom is
fortunate.)
Back-patch to 9.1, except for the hunk that adds verification that the
finished archive TOC list is in correct section order; the place where
it was convenient to add that doesn't exist in 9.1.
2012-06-26 03:19:10 +02:00
|
|
|
(int) te->section);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (te->section != SECTION_NONE)
|
|
|
|
curSection = te->section;
|
Improve pg_dump's dependency-sorting logic to enforce section dump order.
As of 9.2, with the --section option, it is very important that the concept
of "pre data", "data", and "post data" sections of the output be honored
strictly; else a dump divided into separate sectional files might be
unrestorable. However, the dependency-sorting logic knew nothing of
sections and would happily select output orderings that didn't fit that
structure. Doing so was mostly harmless before 9.2, but now we need to be
sure it doesn't do that. To fix, create dummy objects representing the
section boundaries and add dependencies between them and all the normal
objects. (This might sound expensive but it seems to only add a percent or
two to pg_dump's runtime.)
This also fixes a problem introduced in 9.1 by the feature that allows
incomplete GROUP BY lists when a primary key is given in GROUP BY.
That means that views can depend on primary key constraints. Previously,
pg_dump would deal with that by simply emitting the primary key constraint
before the view definition (and hence before the data section of the
output). That's bad enough for simple serial restores, where creating an
index before the data is loaded works, but is undesirable for speed
reasons. But it could lead to outright failure of parallel restores, as
seen in bug #6699 from Joe Van Dyk. That happened because pg_restore would
switch into parallel mode as soon as it reached the constraint, and then
very possibly would try to emit the view definition before the primary key
was committed (as a consequence of another bug that causes the view not to
be correctly marked as depending on the constraint). Adding the section
boundary constraints forces the dependency-sorting code to break the view
into separate table and rule declarations, allowing the rule, and hence the
primary key constraint it depends on, to revert to their intended location
in the post-data section. This also somewhat accidentally works around the
bogus-dependency-marking problem, because the rule will be correctly shown
as depending on the constraint, so parallel pg_restore will now do the
right thing. (We will fix the bogus-dependency problem for real in a
separate patch, but that patch is not easily back-portable to 9.1, so the
fact that this patch is enough to dodge the only known symptom is
fortunate.)
Back-patch to 9.1, except for the hunk that adds verification that the
finished archive TOC list is in correct section order; the place where
it was convenient to add that doesn't exist in 9.1.
2012-06-26 03:19:10 +02:00
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
te->reqs = _tocEntryRequired(te, curSection, AH);
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
}
|
2015-09-14 15:19:49 +02:00
|
|
|
|
|
|
|
/* Enforce strict names checking */
|
|
|
|
if (ropt->strict_names)
|
|
|
|
StrictNamesCheck(ropt);
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
|
|
|
void
|
|
|
|
RestoreArchive(Archive *AHX)
|
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
2011-08-29 04:27:48 +02:00
|
|
|
bool parallel_mode;
|
2005-04-15 18:40:36 +02:00
|
|
|
TocEntry *te;
|
2000-07-04 16:25:28 +02:00
|
|
|
OutputContext sav;
|
|
|
|
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
AH->stage = STAGE_INITIALIZING;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2011-08-29 04:27:48 +02:00
|
|
|
/*
|
|
|
|
* If we're going to do parallel restore, there are some restrictions.
|
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
parallel_mode = (AH->public.numWorkers > 1 && ropt->useDB);
|
2011-08-29 04:27:48 +02:00
|
|
|
if (parallel_mode)
|
|
|
|
{
|
|
|
|
/* We haven't got round to making this work for all archive formats */
|
|
|
|
if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("parallel restore is not supported with this archive file format");
|
2011-08-29 04:27:48 +02:00
|
|
|
|
|
|
|
/* Doesn't work if the archive represents dependencies as OIDs */
|
|
|
|
if (AH->version < K_VERS_1_8)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump");
|
2011-08-29 04:27:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It's also not gonna work if we can't reopen the input file, so
|
|
|
|
* let's try that immediately.
|
|
|
|
*/
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->ReopenPtr(AH);
|
2011-08-29 04:27:48 +02:00
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Make sure we won't need (de)compression we haven't got
|
|
|
|
*/
|
|
|
|
#ifndef HAVE_LIBZ
|
|
|
|
if (AH->compression != 0 && AH->PrintTocDataPtr != NULL)
|
|
|
|
{
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("cannot restore from compressed archive (compression not supported in this installation)");
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
/*
|
|
|
|
* Prepare index arrays, so we can assume we have them throughout restore.
|
|
|
|
* It's possible we already did this, though.
|
|
|
|
*/
|
|
|
|
if (AH->tocsByDumpId == NULL)
|
|
|
|
buildTocEntryArrays(AH);
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* If we're using a DB connection, then connect it.
|
|
|
|
*/
|
|
|
|
if (ropt->useDB)
|
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("connecting to database for restore");
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->version < K_VERS_1_3)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("direct database connections are not supported in pre-1.3 archives");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2013-12-29 22:17:52 +01:00
|
|
|
/*
|
|
|
|
* We don't want to guess at whether the dump will successfully
|
|
|
|
* restore; allow the attempt regardless of the version of the restore
|
|
|
|
* target.
|
|
|
|
*/
|
|
|
|
AHX->minRemoteVersion = 0;
|
2016-10-12 18:19:56 +02:00
|
|
|
AHX->maxRemoteVersion = 9999999;
|
2001-04-25 09:03:20 +02:00
|
|
|
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
ConnectDatabase(AHX, &ropt->cparams, false);
|
2004-08-29 07:07:03 +02:00
|
|
|
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
/*
|
|
|
|
* If we're talking to the DB directly, don't send comments since they
|
|
|
|
* obscure SQL when displaying errors
|
|
|
|
*/
|
|
|
|
AH->noTocComments = 1;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2001-03-06 05:08:04 +01:00
|
|
|
/*
|
2002-05-11 00:36:27 +02:00
|
|
|
* Work out if we have an implied data-only restore. This can happen if
|
2001-03-06 05:08:04 +01:00
|
|
|
* the dump was data only or if the user has used a toc list to exclude
|
|
|
|
* all of the schema data. All we do is look for schema entries - if none
|
|
|
|
* are found then we set the dataOnly flag.
|
|
|
|
*
|
|
|
|
* We could scan for wanted TABLE entries, but that is not the same as
|
|
|
|
* dataOnly. At this stage, it seems unnecessary (6-Mar-2001).
|
|
|
|
*/
|
|
|
|
if (!ropt->dataOnly)
|
|
|
|
{
|
2005-01-25 23:44:31 +01:00
|
|
|
int impliedDataOnly = 1;
|
|
|
|
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2001-03-06 05:08:04 +01:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if ((te->reqs & REQ_SCHEMA) != 0)
|
2001-03-06 05:08:04 +01:00
|
|
|
{ /* It's schema, and it's wanted */
|
|
|
|
impliedDataOnly = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (impliedDataOnly)
|
|
|
|
{
|
|
|
|
ropt->dataOnly = impliedDataOnly;
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("implied data-only restore");
|
2001-03-06 05:08:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Setup the output file if necessary.
|
2007-01-25 04:30:43 +01:00
|
|
|
*/
|
2011-01-22 23:56:42 +01:00
|
|
|
sav = SaveOutput(AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
if (ropt->filename || ropt->compression)
|
2011-01-22 23:56:42 +01:00
|
|
|
SetOutput(AH, ropt->filename, ropt->compression);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2002-08-18 11:36:26 +02:00
|
|
|
ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
|
2003-08-04 02:43:34 +02:00
|
|
|
|
2014-07-08 01:02:45 +02:00
|
|
|
if (AH->archiveRemoteVersion)
|
|
|
|
ahprintf(AH, "-- Dumped from database version %s\n",
|
|
|
|
AH->archiveRemoteVersion);
|
|
|
|
if (AH->archiveDumpVersion)
|
|
|
|
ahprintf(AH, "-- Dumped by pg_dump version %s\n",
|
|
|
|
AH->archiveDumpVersion);
|
|
|
|
|
|
|
|
ahprintf(AH, "\n");
|
|
|
|
|
2010-02-23 22:48:32 +01:00
|
|
|
if (AH->public.verbose)
|
2005-04-15 18:40:36 +02:00
|
|
|
dumpTimestamp(AH, "Started on", AH->createDate);
|
|
|
|
|
2006-02-13 22:30:19 +01:00
|
|
|
if (ropt->single_txn)
|
2006-02-15 00:30:43 +01:00
|
|
|
{
|
|
|
|
if (AH->connection)
|
2014-10-14 20:00:55 +02:00
|
|
|
StartTransaction(AHX);
|
2006-02-15 00:30:43 +01:00
|
|
|
else
|
|
|
|
ahprintf(AH, "BEGIN;\n\n");
|
|
|
|
}
|
2006-02-13 22:30:19 +01:00
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
/*
|
|
|
|
* Establish important parameter values right away.
|
|
|
|
*/
|
|
|
|
_doSetFixedOutputState(AH);
|
|
|
|
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
AH->stage = STAGE_PROCESSING;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Drop the items at the start, in reverse order
|
|
|
|
*/
|
2000-07-04 16:25:28 +02:00
|
|
|
if (ropt->dropSchema)
|
|
|
|
{
|
2005-04-15 18:40:36 +02:00
|
|
|
for (te = AH->toc->prev; te != AH->toc; te = te->prev)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2005-04-15 18:40:36 +02:00
|
|
|
AH->currentTE = te;
|
|
|
|
|
2012-10-20 22:58:32 +02:00
|
|
|
/*
|
|
|
|
* In createDB mode, issue a DROP *only* for the database as a
|
|
|
|
* whole. Issuing drops against anything else would be wrong,
|
|
|
|
* because at this point we're connected to the wrong database.
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
* (The DATABASE PROPERTIES entry, if any, should be treated like
|
|
|
|
* the DATABASE entry.)
|
2012-10-20 22:58:32 +02:00
|
|
|
*/
|
|
|
|
if (ropt->createDB)
|
|
|
|
{
|
Move handling of database properties from pg_dumpall into pg_dump.
This patch rearranges the division of labor between pg_dump and pg_dumpall
so that pg_dump itself handles all properties attached to a single
database. Notably, a database's ACL (GRANT/REVOKE status) and local GUC
settings established by ALTER DATABASE SET and ALTER ROLE IN DATABASE SET
can be dumped and restored by pg_dump. This is a long-requested
improvement.
"pg_dumpall -g" will now produce only role- and tablespace-related output,
nothing about individual databases. The total output of a regular
pg_dumpall run remains the same.
pg_dump (or pg_restore) will restore database-level properties only when
creating the target database with --create. This applies not only to
ACLs and GUCs but to the other database properties it already handled,
that is database comments and security labels. This is more consistent
and useful, but does represent an incompatibility in the behavior seen
without --create.
(This change makes the proposed patch to have pg_dump use "COMMENT ON
DATABASE CURRENT_DATABASE" unnecessary, since there is no case where
the command is issued that we won't know the true name of the database.
We might still want that patch as a feature in its own right, but pg_dump
no longer needs it.)
pg_dumpall with --clean will now drop and recreate the "postgres" and
"template1" databases in the target cluster, allowing their locale and
encoding settings to be changed if necessary, and providing a cleaner
way to set nondefault tablespaces for them than we had before. This
means that such a script must now always be started in the "postgres"
database; the order of drops and reconnects will not work otherwise.
Without --clean, the script will not adjust any database-level properties
of those two databases (including their comments, ACLs, and security
labels, which it formerly would try to set).
Another minor incompatibility is that the CREATE DATABASE commands in a
pg_dumpall script will now always specify locale and encoding settings.
Formerly those would be omitted if they matched the cluster's default.
While that behavior had some usefulness in some migration scenarios,
it also posed a significant hazard of unwanted locale/encoding changes.
To migrate to another locale/encoding, it's now necessary to use pg_dump
without --create to restore into a database with the desired settings.
Commit 4bd371f6f's hack to emit "SET default_transaction_read_only = off"
is gone: we now dodge that problem by the expedient of not issuing ALTER
DATABASE SET commands until after reconnecting to the target database.
Therefore, such settings won't apply during the restore session.
In passing, improve some shaky grammar in the docs, and add a note pointing
out that pg_dumpall's output can't be expected to load without any errors.
(Someday we might want to fix that, but this is not that patch.)
Haribabu Kommi, reviewed at various times by Andreas Karlsson,
Vaishnavi Prabakaran, and Robert Haas; further hacking by me.
Discussion: https://postgr.es/m/CAJrrPGcUurV0eWTeXODwsOYFN=Ekq36t1s0YnFYUNzsmRfdAyA@mail.gmail.com
2018-01-22 20:09:09 +01:00
|
|
|
if (strcmp(te->desc, "DATABASE") != 0 &&
|
|
|
|
strcmp(te->desc, "DATABASE PROPERTIES") != 0)
|
2012-10-20 22:58:32 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise, drop anything that's selected and has a dropStmt */
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0) && te->dropStmt)
|
2001-07-03 22:21:50 +02:00
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("dropping %s %s", te->desc, te->tag);
|
2002-05-11 00:36:27 +02:00
|
|
|
/* Select owner and schema as necessary */
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeOwner(AH, te);
|
2002-05-11 00:36:27 +02:00
|
|
|
_selectOutputSchema(AH, te->namespace);
|
2014-03-03 19:02:18 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now emit the DROP command, if the object has one. Note we
|
|
|
|
* don't necessarily emit it verbatim; at this point we add an
|
|
|
|
* appropriate IF EXISTS clause, if the user requested it.
|
|
|
|
*/
|
|
|
|
if (*te->dropStmt != '\0')
|
|
|
|
{
|
|
|
|
if (!ropt->if_exists)
|
|
|
|
{
|
|
|
|
/* No --if-exists? Then just use the original */
|
|
|
|
ahprintf(AH, "%s", te->dropStmt);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2014-09-30 17:06:37 +02:00
|
|
|
* Inject an appropriate spelling of "if exists". For
|
|
|
|
* large objects, we have a separate routine that
|
|
|
|
* knows how to do it, without depending on
|
|
|
|
* te->dropStmt; use that. For other objects we need
|
|
|
|
* to parse the command.
|
2014-03-03 19:02:18 +01:00
|
|
|
*/
|
2014-09-30 17:06:37 +02:00
|
|
|
if (strncmp(te->desc, "BLOB", 4) == 0)
|
2014-03-03 19:02:18 +01:00
|
|
|
{
|
2014-09-30 17:06:37 +02:00
|
|
|
DropBlobIfExists(AH, te->catalogId.oid);
|
2014-03-03 19:02:18 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-09-30 17:06:37 +02:00
|
|
|
char *dropStmt = pg_strdup(te->dropStmt);
|
2016-11-17 20:59:13 +01:00
|
|
|
char *dropStmtOrig = dropStmt;
|
2014-09-30 17:06:37 +02:00
|
|
|
PQExpBuffer ftStmt = createPQExpBuffer();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Need to inject IF EXISTS clause after ALTER
|
|
|
|
* TABLE part in ALTER TABLE .. DROP statement
|
|
|
|
*/
|
|
|
|
if (strncmp(dropStmt, "ALTER TABLE", 11) == 0)
|
|
|
|
{
|
2019-07-04 03:01:13 +02:00
|
|
|
appendPQExpBufferStr(ftStmt,
|
|
|
|
"ALTER TABLE IF EXISTS");
|
2014-09-30 17:06:37 +02:00
|
|
|
dropStmt = dropStmt + 11;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ALTER TABLE..ALTER COLUMN..DROP DEFAULT does
|
|
|
|
* not support the IF EXISTS clause, and therefore
|
2016-11-17 20:59:13 +01:00
|
|
|
* we simply emit the original command for DEFAULT
|
|
|
|
* objects (modulo the adjustment made above).
|
|
|
|
*
|
Move handling of database properties from pg_dumpall into pg_dump.
This patch rearranges the division of labor between pg_dump and pg_dumpall
so that pg_dump itself handles all properties attached to a single
database. Notably, a database's ACL (GRANT/REVOKE status) and local GUC
settings established by ALTER DATABASE SET and ALTER ROLE IN DATABASE SET
can be dumped and restored by pg_dump. This is a long-requested
improvement.
"pg_dumpall -g" will now produce only role- and tablespace-related output,
nothing about individual databases. The total output of a regular
pg_dumpall run remains the same.
pg_dump (or pg_restore) will restore database-level properties only when
creating the target database with --create. This applies not only to
ACLs and GUCs but to the other database properties it already handled,
that is database comments and security labels. This is more consistent
and useful, but does represent an incompatibility in the behavior seen
without --create.
(This change makes the proposed patch to have pg_dump use "COMMENT ON
DATABASE CURRENT_DATABASE" unnecessary, since there is no case where
the command is issued that we won't know the true name of the database.
We might still want that patch as a feature in its own right, but pg_dump
no longer needs it.)
pg_dumpall with --clean will now drop and recreate the "postgres" and
"template1" databases in the target cluster, allowing their locale and
encoding settings to be changed if necessary, and providing a cleaner
way to set nondefault tablespaces for them than we had before. This
means that such a script must now always be started in the "postgres"
database; the order of drops and reconnects will not work otherwise.
Without --clean, the script will not adjust any database-level properties
of those two databases (including their comments, ACLs, and security
labels, which it formerly would try to set).
Another minor incompatibility is that the CREATE DATABASE commands in a
pg_dumpall script will now always specify locale and encoding settings.
Formerly those would be omitted if they matched the cluster's default.
While that behavior had some usefulness in some migration scenarios,
it also posed a significant hazard of unwanted locale/encoding changes.
To migrate to another locale/encoding, it's now necessary to use pg_dump
without --create to restore into a database with the desired settings.
Commit 4bd371f6f's hack to emit "SET default_transaction_read_only = off"
is gone: we now dodge that problem by the expedient of not issuing ALTER
DATABASE SET commands until after reconnecting to the target database.
Therefore, such settings won't apply during the restore session.
In passing, improve some shaky grammar in the docs, and add a note pointing
out that pg_dumpall's output can't be expected to load without any errors.
(Someday we might want to fix that, but this is not that patch.)
Haribabu Kommi, reviewed at various times by Andreas Karlsson,
Vaishnavi Prabakaran, and Robert Haas; further hacking by me.
Discussion: https://postgr.es/m/CAJrrPGcUurV0eWTeXODwsOYFN=Ekq36t1s0YnFYUNzsmRfdAyA@mail.gmail.com
2018-01-22 20:09:09 +01:00
|
|
|
* Likewise, don't mess with DATABASE PROPERTIES.
|
|
|
|
*
|
2016-11-17 20:59:13 +01:00
|
|
|
* If we used CREATE OR REPLACE VIEW as a means of
|
|
|
|
* quasi-dropping an ON SELECT rule, that should
|
|
|
|
* be emitted unchanged as well.
|
|
|
|
*
|
|
|
|
* For other object types, we need to extract the
|
|
|
|
* first part of the DROP which includes the
|
|
|
|
* object type. Most of the time this matches
|
2014-09-30 17:06:37 +02:00
|
|
|
* te->desc, so search for that; however for the
|
|
|
|
* different kinds of CONSTRAINTs, we know to
|
|
|
|
* search for hardcoded "DROP CONSTRAINT" instead.
|
|
|
|
*/
|
2016-11-17 20:59:13 +01:00
|
|
|
if (strcmp(te->desc, "DEFAULT") == 0 ||
|
Move handling of database properties from pg_dumpall into pg_dump.
This patch rearranges the division of labor between pg_dump and pg_dumpall
so that pg_dump itself handles all properties attached to a single
database. Notably, a database's ACL (GRANT/REVOKE status) and local GUC
settings established by ALTER DATABASE SET and ALTER ROLE IN DATABASE SET
can be dumped and restored by pg_dump. This is a long-requested
improvement.
"pg_dumpall -g" will now produce only role- and tablespace-related output,
nothing about individual databases. The total output of a regular
pg_dumpall run remains the same.
pg_dump (or pg_restore) will restore database-level properties only when
creating the target database with --create. This applies not only to
ACLs and GUCs but to the other database properties it already handled,
that is database comments and security labels. This is more consistent
and useful, but does represent an incompatibility in the behavior seen
without --create.
(This change makes the proposed patch to have pg_dump use "COMMENT ON
DATABASE CURRENT_DATABASE" unnecessary, since there is no case where
the command is issued that we won't know the true name of the database.
We might still want that patch as a feature in its own right, but pg_dump
no longer needs it.)
pg_dumpall with --clean will now drop and recreate the "postgres" and
"template1" databases in the target cluster, allowing their locale and
encoding settings to be changed if necessary, and providing a cleaner
way to set nondefault tablespaces for them than we had before. This
means that such a script must now always be started in the "postgres"
database; the order of drops and reconnects will not work otherwise.
Without --clean, the script will not adjust any database-level properties
of those two databases (including their comments, ACLs, and security
labels, which it formerly would try to set).
Another minor incompatibility is that the CREATE DATABASE commands in a
pg_dumpall script will now always specify locale and encoding settings.
Formerly those would be omitted if they matched the cluster's default.
While that behavior had some usefulness in some migration scenarios,
it also posed a significant hazard of unwanted locale/encoding changes.
To migrate to another locale/encoding, it's now necessary to use pg_dump
without --create to restore into a database with the desired settings.
Commit 4bd371f6f's hack to emit "SET default_transaction_read_only = off"
is gone: we now dodge that problem by the expedient of not issuing ALTER
DATABASE SET commands until after reconnecting to the target database.
Therefore, such settings won't apply during the restore session.
In passing, improve some shaky grammar in the docs, and add a note pointing
out that pg_dumpall's output can't be expected to load without any errors.
(Someday we might want to fix that, but this is not that patch.)
Haribabu Kommi, reviewed at various times by Andreas Karlsson,
Vaishnavi Prabakaran, and Robert Haas; further hacking by me.
Discussion: https://postgr.es/m/CAJrrPGcUurV0eWTeXODwsOYFN=Ekq36t1s0YnFYUNzsmRfdAyA@mail.gmail.com
2018-01-22 20:09:09 +01:00
|
|
|
strcmp(te->desc, "DATABASE PROPERTIES") == 0 ||
|
2016-11-17 20:59:13 +01:00
|
|
|
strncmp(dropStmt, "CREATE OR REPLACE VIEW", 22) == 0)
|
2015-07-02 11:32:48 +02:00
|
|
|
appendPQExpBufferStr(ftStmt, dropStmt);
|
2014-03-03 19:02:18 +01:00
|
|
|
else
|
2014-09-30 17:06:37 +02:00
|
|
|
{
|
2016-11-17 20:59:13 +01:00
|
|
|
char buffer[40];
|
|
|
|
char *mark;
|
|
|
|
|
2014-09-30 17:06:37 +02:00
|
|
|
if (strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "FK CONSTRAINT") == 0)
|
|
|
|
strcpy(buffer, "DROP CONSTRAINT");
|
|
|
|
else
|
|
|
|
snprintf(buffer, sizeof(buffer), "DROP %s",
|
|
|
|
te->desc);
|
2014-03-03 19:02:18 +01:00
|
|
|
|
2014-09-30 17:06:37 +02:00
|
|
|
mark = strstr(dropStmt, buffer);
|
2014-03-03 19:02:18 +01:00
|
|
|
|
2016-11-17 20:59:13 +01:00
|
|
|
if (mark)
|
|
|
|
{
|
|
|
|
*mark = '\0';
|
|
|
|
appendPQExpBuffer(ftStmt, "%s%s IF EXISTS%s",
|
|
|
|
dropStmt, buffer,
|
|
|
|
mark + strlen(buffer));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* complain and emit unmodified command */
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("could not find where to insert IF EXISTS in statement \"%s\"",
|
2016-11-17 20:59:13 +01:00
|
|
|
dropStmtOrig);
|
|
|
|
appendPQExpBufferStr(ftStmt, dropStmt);
|
|
|
|
}
|
2014-09-30 17:06:37 +02:00
|
|
|
}
|
2014-03-03 19:02:18 +01:00
|
|
|
|
2014-09-30 17:06:37 +02:00
|
|
|
ahprintf(AH, "%s", ftStmt->data);
|
2014-03-03 19:02:18 +01:00
|
|
|
|
2014-09-30 17:06:37 +02:00
|
|
|
destroyPQExpBuffer(ftStmt);
|
2016-11-17 20:59:13 +01:00
|
|
|
pg_free(dropStmtOrig);
|
2014-09-30 17:06:37 +02:00
|
|
|
}
|
2014-03-03 19:02:18 +01:00
|
|
|
}
|
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
}
|
2007-11-24 21:26:49 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* _selectOutputSchema may have set currSchema to reflect the effect
|
|
|
|
* of a "SET search_path" command it emitted. However, by now we may
|
|
|
|
* have dropped that schema; or it might not have existed in the first
|
|
|
|
* place. In either case the effective value of search_path will not
|
|
|
|
* be what we think. Forcibly reset currSchema so that we will
|
|
|
|
* re-establish the search_path setting when needed (after creating
|
|
|
|
* the schema).
|
|
|
|
*
|
|
|
|
* If we treated users as pg_dump'able objects then we'd need to reset
|
|
|
|
* currUser here too.
|
|
|
|
*/
|
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->currSchema = NULL;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2011-08-29 04:27:48 +02:00
|
|
|
if (parallel_mode)
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* In parallel mode, turn control over to the parallel-restore logic.
|
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
ParallelState *pstate;
|
|
|
|
TocEntry pending_list;
|
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/* The archive format module may need some setup for this */
|
|
|
|
if (AH->PrepParallelRestorePtr)
|
|
|
|
AH->PrepParallelRestorePtr(AH);
|
|
|
|
|
|
|
|
pending_list_header_init(&pending_list);
|
2013-03-24 16:27:20 +01:00
|
|
|
|
|
|
|
/* This runs PRE_DATA items and then disconnects from the database */
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
restore_toc_entries_prefork(AH, &pending_list);
|
2013-03-24 16:27:20 +01:00
|
|
|
Assert(AH->connection == NULL);
|
|
|
|
|
|
|
|
/* ParallelBackupStart() will actually fork the processes */
|
2016-01-13 23:48:33 +01:00
|
|
|
pstate = ParallelBackupStart(AH);
|
2013-03-24 16:27:20 +01:00
|
|
|
restore_toc_entries_parallel(AH, pstate, &pending_list);
|
|
|
|
ParallelBackupEnd(AH, pstate);
|
|
|
|
|
2020-06-14 23:22:47 +02:00
|
|
|
/* reconnect the leader and see if we missed something */
|
2013-03-24 16:27:20 +01:00
|
|
|
restore_toc_entries_postfork(AH, &pending_list);
|
|
|
|
Assert(AH->connection != NULL);
|
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
else
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* In serial mode, process everything in three phases: normal items,
|
Fix pg_dump/pg_restore to restore event triggers later.
Previously, event triggers were restored just after regular triggers
(and FK constraints, which are basically triggers). This is risky
since an event trigger, once installed, could interfere with subsequent
restore commands. Worse, because event triggers don't have any
particular dependencies on any post-data objects, a parallel restore
would consider them eligible to be restored the moment the post-data
phase starts, allowing them to also interfere with restoration of a
whole bunch of objects that would have been restored before them in
a serial restore. There's no way to completely remove the risk of a
misguided event trigger breaking the restore, since if nothing else
it could break other event triggers. But we can certainly push them
to later in the process to minimize the hazard.
To fix, tweak the RestorePass mechanism introduced by commit 3eb9a5e7c
so that event triggers are handled as part of the post-ACL processing
pass (renaming the "REFRESH" pass to "POST_ACL" to reflect its more
general use). This will cause them to restore after everything except
matview refreshes, which seems OK since matview refreshes really ought
to run in the post-restore state of the database. In a parallel
restore, event triggers and matview refreshes might be intermixed,
but that seems all right as well.
Also update the code and comments in pg_dump_sort.c so that its idea
of how things are sorted agrees with what actually happens due to
the RestorePass mechanism. This is mostly cosmetic: it'll affect the
order of objects in a dump's TOC, but not the actual restore order.
But not changing that would be quite confusing to somebody reading
the code.
Back-patch to all supported branches.
Fabrízio de Royes Mello, tweaked a bit by me
Discussion: https://postgr.es/m/CAFcNs+ow1hmFox8P--3GSdtwz-S3Binb6ZmoP6Vk+Xg=K6eZNA@mail.gmail.com
2020-03-09 19:58:11 +01:00
|
|
|
* then ACLs, then post-ACL items. We might be able to skip one or
|
|
|
|
* both extra phases in some cases, eg data-only restores.
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
*/
|
|
|
|
bool haveACL = false;
|
Fix pg_dump/pg_restore to restore event triggers later.
Previously, event triggers were restored just after regular triggers
(and FK constraints, which are basically triggers). This is risky
since an event trigger, once installed, could interfere with subsequent
restore commands. Worse, because event triggers don't have any
particular dependencies on any post-data objects, a parallel restore
would consider them eligible to be restored the moment the post-data
phase starts, allowing them to also interfere with restoration of a
whole bunch of objects that would have been restored before them in
a serial restore. There's no way to completely remove the risk of a
misguided event trigger breaking the restore, since if nothing else
it could break other event triggers. But we can certainly push them
to later in the process to minimize the hazard.
To fix, tweak the RestorePass mechanism introduced by commit 3eb9a5e7c
so that event triggers are handled as part of the post-ACL processing
pass (renaming the "REFRESH" pass to "POST_ACL" to reflect its more
general use). This will cause them to restore after everything except
matview refreshes, which seems OK since matview refreshes really ought
to run in the post-restore state of the database. In a parallel
restore, event triggers and matview refreshes might be intermixed,
but that seems all right as well.
Also update the code and comments in pg_dump_sort.c so that its idea
of how things are sorted agrees with what actually happens due to
the RestorePass mechanism. This is mostly cosmetic: it'll affect the
order of objects in a dump's TOC, but not the actual restore order.
But not changing that would be quite confusing to somebody reading
the code.
Back-patch to all supported branches.
Fabrízio de Royes Mello, tweaked a bit by me
Discussion: https://postgr.es/m/CAFcNs+ow1hmFox8P--3GSdtwz-S3Binb6ZmoP6Vk+Xg=K6eZNA@mail.gmail.com
2020-03-09 19:58:11 +01:00
|
|
|
bool havePostACL = false;
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
{
|
|
|
|
if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) == 0)
|
|
|
|
continue; /* ignore if not to be dumped at all */
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
switch (_tocEntryRestorePass(te))
|
|
|
|
{
|
|
|
|
case RESTORE_PASS_MAIN:
|
|
|
|
(void) restore_toc_entry(AH, te, false);
|
|
|
|
break;
|
|
|
|
case RESTORE_PASS_ACL:
|
|
|
|
haveACL = true;
|
|
|
|
break;
|
Fix pg_dump/pg_restore to restore event triggers later.
Previously, event triggers were restored just after regular triggers
(and FK constraints, which are basically triggers). This is risky
since an event trigger, once installed, could interfere with subsequent
restore commands. Worse, because event triggers don't have any
particular dependencies on any post-data objects, a parallel restore
would consider them eligible to be restored the moment the post-data
phase starts, allowing them to also interfere with restoration of a
whole bunch of objects that would have been restored before them in
a serial restore. There's no way to completely remove the risk of a
misguided event trigger breaking the restore, since if nothing else
it could break other event triggers. But we can certainly push them
to later in the process to minimize the hazard.
To fix, tweak the RestorePass mechanism introduced by commit 3eb9a5e7c
so that event triggers are handled as part of the post-ACL processing
pass (renaming the "REFRESH" pass to "POST_ACL" to reflect its more
general use). This will cause them to restore after everything except
matview refreshes, which seems OK since matview refreshes really ought
to run in the post-restore state of the database. In a parallel
restore, event triggers and matview refreshes might be intermixed,
but that seems all right as well.
Also update the code and comments in pg_dump_sort.c so that its idea
of how things are sorted agrees with what actually happens due to
the RestorePass mechanism. This is mostly cosmetic: it'll affect the
order of objects in a dump's TOC, but not the actual restore order.
But not changing that would be quite confusing to somebody reading
the code.
Back-patch to all supported branches.
Fabrízio de Royes Mello, tweaked a bit by me
Discussion: https://postgr.es/m/CAFcNs+ow1hmFox8P--3GSdtwz-S3Binb6ZmoP6Vk+Xg=K6eZNA@mail.gmail.com
2020-03-09 19:58:11 +01:00
|
|
|
case RESTORE_PASS_POST_ACL:
|
|
|
|
havePostACL = true;
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
if (haveACL)
|
2004-07-13 05:00:17 +02:00
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0 &&
|
|
|
|
_tocEntryRestorePass(te) == RESTORE_PASS_ACL)
|
|
|
|
(void) restore_toc_entry(AH, te, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix pg_dump/pg_restore to restore event triggers later.
Previously, event triggers were restored just after regular triggers
(and FK constraints, which are basically triggers). This is risky
since an event trigger, once installed, could interfere with subsequent
restore commands. Worse, because event triggers don't have any
particular dependencies on any post-data objects, a parallel restore
would consider them eligible to be restored the moment the post-data
phase starts, allowing them to also interfere with restoration of a
whole bunch of objects that would have been restored before them in
a serial restore. There's no way to completely remove the risk of a
misguided event trigger breaking the restore, since if nothing else
it could break other event triggers. But we can certainly push them
to later in the process to minimize the hazard.
To fix, tweak the RestorePass mechanism introduced by commit 3eb9a5e7c
so that event triggers are handled as part of the post-ACL processing
pass (renaming the "REFRESH" pass to "POST_ACL" to reflect its more
general use). This will cause them to restore after everything except
matview refreshes, which seems OK since matview refreshes really ought
to run in the post-restore state of the database. In a parallel
restore, event triggers and matview refreshes might be intermixed,
but that seems all right as well.
Also update the code and comments in pg_dump_sort.c so that its idea
of how things are sorted agrees with what actually happens due to
the RestorePass mechanism. This is mostly cosmetic: it'll affect the
order of objects in a dump's TOC, but not the actual restore order.
But not changing that would be quite confusing to somebody reading
the code.
Back-patch to all supported branches.
Fabrízio de Royes Mello, tweaked a bit by me
Discussion: https://postgr.es/m/CAFcNs+ow1hmFox8P--3GSdtwz-S3Binb6ZmoP6Vk+Xg=K6eZNA@mail.gmail.com
2020-03-09 19:58:11 +01:00
|
|
|
if (havePostACL)
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
{
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0 &&
|
Fix pg_dump/pg_restore to restore event triggers later.
Previously, event triggers were restored just after regular triggers
(and FK constraints, which are basically triggers). This is risky
since an event trigger, once installed, could interfere with subsequent
restore commands. Worse, because event triggers don't have any
particular dependencies on any post-data objects, a parallel restore
would consider them eligible to be restored the moment the post-data
phase starts, allowing them to also interfere with restoration of a
whole bunch of objects that would have been restored before them in
a serial restore. There's no way to completely remove the risk of a
misguided event trigger breaking the restore, since if nothing else
it could break other event triggers. But we can certainly push them
to later in the process to minimize the hazard.
To fix, tweak the RestorePass mechanism introduced by commit 3eb9a5e7c
so that event triggers are handled as part of the post-ACL processing
pass (renaming the "REFRESH" pass to "POST_ACL" to reflect its more
general use). This will cause them to restore after everything except
matview refreshes, which seems OK since matview refreshes really ought
to run in the post-restore state of the database. In a parallel
restore, event triggers and matview refreshes might be intermixed,
but that seems all right as well.
Also update the code and comments in pg_dump_sort.c so that its idea
of how things are sorted agrees with what actually happens due to
the RestorePass mechanism. This is mostly cosmetic: it'll affect the
order of objects in a dump's TOC, but not the actual restore order.
But not changing that would be quite confusing to somebody reading
the code.
Back-patch to all supported branches.
Fabrízio de Royes Mello, tweaked a bit by me
Discussion: https://postgr.es/m/CAFcNs+ow1hmFox8P--3GSdtwz-S3Binb6ZmoP6Vk+Xg=K6eZNA@mail.gmail.com
2020-03-09 19:58:11 +01:00
|
|
|
_tocEntryRestorePass(te) == RESTORE_PASS_POST_ACL)
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
(void) restore_toc_entry(AH, te, false);
|
|
|
|
}
|
2004-07-13 05:00:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-13 22:30:19 +01:00
|
|
|
if (ropt->single_txn)
|
2006-02-15 00:30:43 +01:00
|
|
|
{
|
|
|
|
if (AH->connection)
|
2014-10-14 20:00:55 +02:00
|
|
|
CommitTransaction(AHX);
|
2006-02-15 00:30:43 +01:00
|
|
|
else
|
|
|
|
ahprintf(AH, "COMMIT;\n\n");
|
|
|
|
}
|
2006-02-12 05:04:32 +01:00
|
|
|
|
2005-04-15 18:40:36 +02:00
|
|
|
if (AH->public.verbose)
|
|
|
|
dumpTimestamp(AH, "Completed on", time(NULL));
|
|
|
|
|
2005-03-18 18:32:55 +01:00
|
|
|
ahprintf(AH, "--\n-- PostgreSQL database dump complete\n--\n\n");
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
2004-03-03 22:28:55 +01:00
|
|
|
* Clean up & we're done.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
AH->stage = STAGE_FINALIZING;
|
|
|
|
|
2004-03-03 22:28:55 +01:00
|
|
|
if (ropt->filename || ropt->compression)
|
2011-01-22 23:56:42 +01:00
|
|
|
RestoreOutput(AH, sav);
|
2004-03-03 22:28:55 +01:00
|
|
|
|
|
|
|
if (ropt->useDB)
|
2012-02-16 17:49:20 +01:00
|
|
|
DisconnectDatabase(&AH->public);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Restore a single TOC item. Used in both parallel and non-parallel restore;
|
|
|
|
* is_parallel is true if we are in a worker child process.
|
|
|
|
*
|
|
|
|
* Returns 0 normally, but WORKER_CREATE_DONE or WORKER_INHIBIT_DATA if
|
|
|
|
* the parallel parent has to make the corresponding status update.
|
|
|
|
*/
|
|
|
|
static int
|
2016-01-13 23:48:33 +01:00
|
|
|
restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
2013-03-24 16:27:20 +01:00
|
|
|
int status = WORKER_OK;
|
2020-12-11 19:15:30 +01:00
|
|
|
int reqs;
|
2009-02-02 21:07:37 +01:00
|
|
|
bool defnDumped;
|
|
|
|
|
|
|
|
AH->currentTE = te;
|
|
|
|
|
|
|
|
/* Dump any relevant dump warnings to stderr */
|
|
|
|
if (!ropt->suppressDumpWarnings && strcmp(te->desc, "WARNING") == 0)
|
|
|
|
{
|
|
|
|
if (!ropt->dataOnly && te->defn != NULL && strlen(te->defn) != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("warning from original dump file: %s", te->defn);
|
2009-02-02 21:07:37 +01:00
|
|
|
else if (te->copyStmt != NULL && strlen(te->copyStmt) != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("warning from original dump file: %s", te->copyStmt);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/* Work out what, if anything, we want from this entry */
|
|
|
|
reqs = te->reqs;
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
defnDumped = false;
|
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* If it has a schema component that we want, then process that
|
|
|
|
*/
|
|
|
|
if ((reqs & REQ_SCHEMA) != 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/* Show namespace in log message if available */
|
2014-08-26 10:50:48 +02:00
|
|
|
if (te->namespace)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("creating %s \"%s.%s\"",
|
|
|
|
te->desc, te->namespace, te->tag);
|
2014-08-26 10:50:48 +02:00
|
|
|
else
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("creating %s \"%s\"",
|
|
|
|
te->desc, te->tag);
|
2014-08-26 10:50:48 +02:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
_printTocEntry(AH, te, false);
|
2009-02-02 21:07:37 +01:00
|
|
|
defnDumped = true;
|
|
|
|
|
|
|
|
if (strcmp(te->desc, "TABLE") == 0)
|
|
|
|
{
|
|
|
|
if (AH->lastErrorTE == te)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We failed to create the table. If
|
|
|
|
* --no-data-for-failed-tables was given, mark the
|
|
|
|
* corresponding TABLE DATA to be ignored.
|
|
|
|
*
|
|
|
|
* In the parallel case this must be done in the parent, so we
|
|
|
|
* just set the return value.
|
|
|
|
*/
|
|
|
|
if (ropt->noDataForFailedTables)
|
|
|
|
{
|
|
|
|
if (is_parallel)
|
2013-03-24 16:27:20 +01:00
|
|
|
status = WORKER_INHIBIT_DATA;
|
2009-02-02 21:07:37 +01:00
|
|
|
else
|
|
|
|
inhibit_data_for_failed_table(AH, te);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We created the table successfully. Mark the corresponding
|
|
|
|
* TABLE DATA for possible truncation.
|
|
|
|
*
|
|
|
|
* In the parallel case this must be done in the parent, so we
|
|
|
|
* just set the return value.
|
|
|
|
*/
|
|
|
|
if (is_parallel)
|
2013-03-24 16:27:20 +01:00
|
|
|
status = WORKER_CREATE_DONE;
|
2009-02-02 21:07:37 +01:00
|
|
|
else
|
|
|
|
mark_create_done(AH, te);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
In pg_dump, force reconnection after issuing ALTER DATABASE SET command(s).
The folly of not doing this was exposed by the buildfarm: in some cases,
the GUC settings applied through ALTER DATABASE SET may be essential to
interpreting the reloaded data correctly. Another argument why we can't
really get away with the scheme proposed in commit b3f840120 is that it
cannot work for parallel restore: even if the parent process manages to
hang onto the previous GUC state, worker processes would see the state
post-ALTER-DATABASE. (Perhaps we could have dodged that bullet by
delaying DATABASE PROPERTIES restoration to the end of the run, but
that does nothing for the data semantics problem.)
This leaves us with no solution for the default_transaction_read_only issue
that commit 4bd371f6f intended to work around, other than "you gotta remove
such settings before dumping/upgrading". However, in view of the fact that
parallel restore broke that hack years ago and no one has noticed, it's
fair to question how many people care. I'm unexcited about adding a large
dollop of new complexity to handle that corner case.
This would be a one-liner fix, except it turns out that ReconnectToServer
tries to optimize away "redundant" reconnections. While that may have been
valuable when coded, a quick survey of current callers shows that there are
no cases where that's actually useful, so just remove that check. While at
it, remove the function's useless return value.
Discussion: https://postgr.es/m/12453.1516655001@sss.pgh.pa.us
2018-01-23 16:55:08 +01:00
|
|
|
/*
|
|
|
|
* If we created a DB, connect to it. Also, if we changed DB
|
|
|
|
* properties, reconnect to ensure that relevant GUC settings are
|
|
|
|
* applied to our session.
|
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "DATABASE") == 0 ||
|
|
|
|
strcmp(te->desc, "DATABASE PROPERTIES") == 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("connecting to new database \"%s\"", te->tag);
|
2009-02-02 21:07:37 +01:00
|
|
|
_reconnectToDB(AH, te->tag);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
* If it has a data component that we want, then process that
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if ((reqs & REQ_DATA) != 0)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* hadDumper will be set if there is genuine data component for this
|
|
|
|
* node. Otherwise, we need to check the defn field for statements
|
|
|
|
* that need to be executed in data-only restores.
|
|
|
|
*/
|
|
|
|
if (te->hadDumper)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If we can output the data, then restore it.
|
|
|
|
*/
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (AH->PrintTocDataPtr != NULL)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
_printTocEntry(AH, te, true);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
if (strcmp(te->desc, "BLOBS") == 0 ||
|
|
|
|
strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("processing %s", te->desc);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
_selectOutputSchema(AH, "pg_catalog");
|
|
|
|
|
2014-06-13 02:14:32 +02:00
|
|
|
/* Send BLOB COMMENTS data to ExecuteSimpleCommands() */
|
|
|
|
if (strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
|
|
AH->outputKind = OUTPUT_OTHERDATA;
|
|
|
|
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->PrintTocDataPtr(AH, te);
|
2014-06-13 02:14:32 +02:00
|
|
|
|
|
|
|
AH->outputKind = OUTPUT_SQLCMDS;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
_disableTriggersIfNecessary(AH, te);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* Select owner and schema as necessary */
|
|
|
|
_becomeOwner(AH, te);
|
|
|
|
_selectOutputSchema(AH, te->namespace);
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("processing data for table \"%s.%s\"",
|
2014-08-26 10:50:48 +02:00
|
|
|
te->namespace, te->tag);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In parallel restore, if we created the table earlier in
|
|
|
|
* the run then we wrap the COPY in a transaction and
|
|
|
|
* precede it with a TRUNCATE. If archiving is not on
|
|
|
|
* this prevents WAL-logging the COPY. This obtains a
|
|
|
|
* speedup similar to that from using single_txn mode in
|
|
|
|
* non-parallel restores.
|
|
|
|
*/
|
|
|
|
if (is_parallel && te->created)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Parallel restore is always talking directly to a
|
|
|
|
* server, so no need to see if we should issue BEGIN.
|
|
|
|
*/
|
2014-10-14 20:00:55 +02:00
|
|
|
StartTransaction(&AH->public);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the server version is >= 8.4, make sure we issue
|
|
|
|
* TRUNCATE with ONLY so that child tables are not
|
|
|
|
* wiped.
|
|
|
|
*/
|
|
|
|
ahprintf(AH, "TRUNCATE TABLE %s%s;\n\n",
|
|
|
|
(PQserverVersion(AH->connection) >= 80400 ?
|
|
|
|
"ONLY " : ""),
|
Ensure schema qualification in pg_restore DISABLE/ENABLE TRIGGER commands.
Previously, this code blindly followed the common coding pattern of
passing PQserverVersion(AH->connection) as the server-version parameter
of fmtQualifiedId. That works as long as we have a connection; but in
pg_restore with text output, we don't. Instead we got a zero from
PQserverVersion, which fmtQualifiedId interpreted as "server is too old to
have schemas", and so the name went unqualified. That still accidentally
managed to work in many cases, which is probably why this ancient bug went
undetected for so long. It only became obvious in the wake of the changes
to force dump/restore to execute with restricted search_path.
In HEAD/v11, let's deal with this by ripping out fmtQualifiedId's server-
version behavioral dependency, and just making it schema-qualify all the
time. We no longer support pg_dump from servers old enough to need the
ability to omit schema name, let alone restoring to them. (Also, the few
callers outside pg_dump already didn't work with pre-schema servers.)
In older branches, that's not an acceptable solution, so instead just
tweak the DISABLE/ENABLE TRIGGER logic to ensure it will schema-qualify
its output regardless of server version.
Per bug #15338 from Oleg somebody. Back-patch to all supported branches.
Discussion: https://postgr.es/m/153452458706.1316.5328079417086507743@wrigleys.postgresql.org
2018-08-17 23:12:21 +02:00
|
|
|
fmtQualifiedId(te->namespace, te->tag));
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-07-28 20:06:57 +02:00
|
|
|
* If we have a copy statement, use it.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (te->copyStmt && strlen(te->copyStmt) > 0)
|
|
|
|
{
|
|
|
|
ahprintf(AH, "%s", te->copyStmt);
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
AH->outputKind = OUTPUT_COPYDATA;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
else
|
|
|
|
AH->outputKind = OUTPUT_OTHERDATA;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->PrintTocDataPtr(AH, te);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
/*
|
|
|
|
* Terminate COPY if needed.
|
|
|
|
*/
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
if (AH->outputKind == OUTPUT_COPYDATA &&
|
|
|
|
RestoringToDB(AH))
|
2014-10-14 20:00:55 +02:00
|
|
|
EndDBCopyMode(&AH->public, te->tag);
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
AH->outputKind = OUTPUT_SQLCMDS;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* close out the transaction started above */
|
|
|
|
if (is_parallel && te->created)
|
2014-10-14 20:00:55 +02:00
|
|
|
CommitTransaction(&AH->public);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2016-01-13 23:48:33 +01:00
|
|
|
_enableTriggersIfNecessary(AH, te);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!defnDumped)
|
|
|
|
{
|
|
|
|
/* If we haven't already dumped the defn part, do so now */
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("executing %s %s", te->desc, te->tag);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
_printTocEntry(AH, te, false);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
if (AH->public.n_errors > 0 && status == WORKER_OK)
|
|
|
|
status = WORKER_IGNORED_ERRORS;
|
|
|
|
|
|
|
|
return status;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Allocate a new RestoreOptions block.
|
|
|
|
* This is mainly so we can initialize it, but also for future expansion,
|
|
|
|
*/
|
2000-07-04 16:25:28 +02:00
|
|
|
RestoreOptions *
|
|
|
|
NewRestoreOptions(void)
|
|
|
|
{
|
|
|
|
RestoreOptions *opts;
|
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
opts = (RestoreOptions *) pg_malloc0(sizeof(RestoreOptions));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2009-01-05 17:54:37 +01:00
|
|
|
/* set any fields that shouldn't default to zeroes */
|
2000-07-04 16:25:28 +02:00
|
|
|
opts->format = archUnknown;
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
opts->cparams.promptPassword = TRI_DEFAULT;
|
2011-12-17 01:09:38 +01:00
|
|
|
opts->dumpSections = DUMP_UNSECTIONED;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
return opts;
|
|
|
|
}
|
|
|
|
|
2001-03-06 05:08:04 +01:00
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
/* This hack is only needed in a data-only restore */
|
|
|
|
if (!ropt->dataOnly || !ropt->disable_triggers)
|
2000-08-01 17:51:45 +02:00
|
|
|
return;
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("disabling triggers for %s", te->tag);
|
2005-08-24 00:40:47 +02:00
|
|
|
|
2000-08-01 17:51:45 +02:00
|
|
|
/*
|
2002-05-11 00:36:27 +02:00
|
|
|
* Become superuser if possible, since they are the only ones who can
|
2005-08-24 00:40:47 +02:00
|
|
|
* disable constraint triggers. If -S was not given, assume the initial
|
|
|
|
* user identity is a superuser. (XXX would it be better to become the
|
|
|
|
* table owner?)
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeUser(AH, ropt->superuser);
|
2000-08-01 17:51:45 +02:00
|
|
|
|
|
|
|
/*
|
2005-08-24 00:40:47 +02:00
|
|
|
* Disable them.
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2005-08-24 00:40:47 +02:00
|
|
|
ahprintf(AH, "ALTER TABLE %s DISABLE TRIGGER ALL;\n\n",
|
Ensure schema qualification in pg_restore DISABLE/ENABLE TRIGGER commands.
Previously, this code blindly followed the common coding pattern of
passing PQserverVersion(AH->connection) as the server-version parameter
of fmtQualifiedId. That works as long as we have a connection; but in
pg_restore with text output, we don't. Instead we got a zero from
PQserverVersion, which fmtQualifiedId interpreted as "server is too old to
have schemas", and so the name went unqualified. That still accidentally
managed to work in many cases, which is probably why this ancient bug went
undetected for so long. It only became obvious in the wake of the changes
to force dump/restore to execute with restricted search_path.
In HEAD/v11, let's deal with this by ripping out fmtQualifiedId's server-
version behavioral dependency, and just making it schema-qualify all the
time. We no longer support pg_dump from servers old enough to need the
ability to omit schema name, let alone restoring to them. (Also, the few
callers outside pg_dump already didn't work with pre-schema servers.)
In older branches, that's not an acceptable solution, so instead just
tweak the DISABLE/ENABLE TRIGGER logic to ensure it will schema-qualify
its output regardless of server version.
Per bug #15338 from Oleg somebody. Back-patch to all supported branches.
Discussion: https://postgr.es/m/153452458706.1316.5328079417086507743@wrigleys.postgresql.org
2018-08-17 23:12:21 +02:00
|
|
|
fmtQualifiedId(te->namespace, te->tag));
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-03-06 05:08:04 +01:00
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
/* This hack is only needed in a data-only restore */
|
|
|
|
if (!ropt->dataOnly || !ropt->disable_triggers)
|
2000-08-01 17:51:45 +02:00
|
|
|
return;
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("enabling triggers for %s", te->tag);
|
2005-08-24 00:40:47 +02:00
|
|
|
|
2000-08-01 17:51:45 +02:00
|
|
|
/*
|
2002-05-11 00:36:27 +02:00
|
|
|
* Become superuser if possible, since they are the only ones who can
|
2005-08-24 00:40:47 +02:00
|
|
|
* disable constraint triggers. If -S was not given, assume the initial
|
|
|
|
* user identity is a superuser. (XXX would it be better to become the
|
|
|
|
* table owner?)
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeUser(AH, ropt->superuser);
|
2000-08-01 17:51:45 +02:00
|
|
|
|
|
|
|
/*
|
2005-08-24 00:40:47 +02:00
|
|
|
* Enable them.
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2005-08-24 00:40:47 +02:00
|
|
|
ahprintf(AH, "ALTER TABLE %s ENABLE TRIGGER ALL;\n\n",
|
Ensure schema qualification in pg_restore DISABLE/ENABLE TRIGGER commands.
Previously, this code blindly followed the common coding pattern of
passing PQserverVersion(AH->connection) as the server-version parameter
of fmtQualifiedId. That works as long as we have a connection; but in
pg_restore with text output, we don't. Instead we got a zero from
PQserverVersion, which fmtQualifiedId interpreted as "server is too old to
have schemas", and so the name went unqualified. That still accidentally
managed to work in many cases, which is probably why this ancient bug went
undetected for so long. It only became obvious in the wake of the changes
to force dump/restore to execute with restricted search_path.
In HEAD/v11, let's deal with this by ripping out fmtQualifiedId's server-
version behavioral dependency, and just making it schema-qualify all the
time. We no longer support pg_dump from servers old enough to need the
ability to omit schema name, let alone restoring to them. (Also, the few
callers outside pg_dump already didn't work with pre-schema servers.)
In older branches, that's not an acceptable solution, so instead just
tweak the DISABLE/ENABLE TRIGGER logic to ensure it will schema-qualify
its output regardless of server version.
Per bug #15338 from Oleg somebody. Back-patch to all supported branches.
Discussion: https://postgr.es/m/153452458706.1316.5328079417086507743@wrigleys.postgresql.org
2018-08-17 23:12:21 +02:00
|
|
|
fmtQualifiedId(te->namespace, te->tag));
|
2000-08-01 17:51:45 +02:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
/*
|
2000-07-21 13:40:08 +02:00
|
|
|
* This is a routine that is part of the dumper interface, hence the 'Archive*' parameter.
|
2000-07-04 16:25:28 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Public */
|
2014-05-06 02:27:16 +02:00
|
|
|
void
|
2002-08-20 19:54:45 +02:00
|
|
|
WriteData(Archive *AHX, const void *data, size_t dLen)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (!AH->currToc)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteDataPtr(AH, data, dLen);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a new TOC entry. The TOC was designed as a TOC, but is now the
|
|
|
|
* repository for all metadata. But the name has stuck.
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
*
|
|
|
|
* The new entry is added to the Archive's TOC list. Most callers can ignore
|
|
|
|
* the result value because nothing else need be done, but a few want to
|
|
|
|
* manipulate the TOC entry further.
|
2000-07-04 16:25:28 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Public */
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
TocEntry *
|
2019-02-01 15:29:42 +01:00
|
|
|
ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId,
|
|
|
|
ArchiveOpts *opts)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
TocEntry *newToc;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
newToc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
AH->tocCount++;
|
|
|
|
if (dumpId > AH->maxDumpId)
|
|
|
|
AH->maxDumpId = dumpId;
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
newToc->prev = AH->toc->prev;
|
|
|
|
newToc->next = AH->toc;
|
|
|
|
AH->toc->prev->next = newToc;
|
|
|
|
AH->toc->prev = newToc;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
newToc->catalogId = catalogId;
|
|
|
|
newToc->dumpId = dumpId;
|
2019-02-01 15:29:42 +01:00
|
|
|
newToc->section = opts->section;
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2019-02-01 15:29:42 +01:00
|
|
|
newToc->tag = pg_strdup(opts->tag);
|
|
|
|
newToc->namespace = opts->namespace ? pg_strdup(opts->namespace) : NULL;
|
|
|
|
newToc->tablespace = opts->tablespace ? pg_strdup(opts->tablespace) : NULL;
|
2019-03-06 18:54:38 +01:00
|
|
|
newToc->tableam = opts->tableam ? pg_strdup(opts->tableam) : NULL;
|
2019-04-26 18:03:59 +02:00
|
|
|
newToc->owner = opts->owner ? pg_strdup(opts->owner) : NULL;
|
2019-02-01 15:29:42 +01:00
|
|
|
newToc->desc = pg_strdup(opts->description);
|
2019-04-26 18:03:59 +02:00
|
|
|
newToc->defn = opts->createStmt ? pg_strdup(opts->createStmt) : NULL;
|
|
|
|
newToc->dropStmt = opts->dropStmt ? pg_strdup(opts->dropStmt) : NULL;
|
2019-02-01 15:29:42 +01:00
|
|
|
newToc->copyStmt = opts->copyStmt ? pg_strdup(opts->copyStmt) : NULL;
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2019-02-01 15:29:42 +01:00
|
|
|
if (opts->nDeps > 0)
|
2003-12-06 04:00:16 +01:00
|
|
|
{
|
2019-02-01 15:29:42 +01:00
|
|
|
newToc->dependencies = (DumpId *) pg_malloc(opts->nDeps * sizeof(DumpId));
|
|
|
|
memcpy(newToc->dependencies, opts->deps, opts->nDeps * sizeof(DumpId));
|
|
|
|
newToc->nDeps = opts->nDeps;
|
2003-12-06 04:00:16 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
newToc->dependencies = NULL;
|
|
|
|
newToc->nDeps = 0;
|
|
|
|
}
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2019-02-01 15:29:42 +01:00
|
|
|
newToc->dataDumper = opts->dumpFn;
|
|
|
|
newToc->dataDumperArg = opts->dumpArg;
|
|
|
|
newToc->hadDumper = opts->dumpFn ? true : false;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
newToc->formatData = NULL;
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
newToc->dataLength = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
if (AH->ArchiveEntryPtr != NULL)
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->ArchiveEntryPtr(AH, newToc);
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
|
|
|
|
return newToc;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
|
|
|
void
|
2016-01-13 23:48:33 +01:00
|
|
|
PrintTOCSummary(Archive *AHX)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
2009-02-02 21:07:37 +01:00
|
|
|
TocEntry *te;
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
teSection curSection;
|
2000-07-04 16:25:28 +02:00
|
|
|
OutputContext sav;
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
const char *fmtName;
|
2014-09-06 01:22:31 +02:00
|
|
|
char stamp_str[64];
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2011-01-22 23:56:42 +01:00
|
|
|
sav = SaveOutput(AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
if (ropt->filename)
|
2011-01-22 23:56:42 +01:00
|
|
|
SetOutput(AH, ropt->filename, 0 /* no compression */ );
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2014-10-27 01:59:21 +01:00
|
|
|
if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT,
|
|
|
|
localtime(&AH->createDate)) == 0)
|
|
|
|
strcpy(stamp_str, "[unknown]");
|
|
|
|
|
2014-09-06 01:22:31 +02:00
|
|
|
ahprintf(AH, ";\n; Archive created at %s\n", stamp_str);
|
2000-07-21 13:40:08 +02:00
|
|
|
ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %d\n",
|
2019-02-01 15:29:42 +01:00
|
|
|
sanitize_line(AH->archdbname, false),
|
2017-03-10 20:15:09 +01:00
|
|
|
AH->tocCount, AH->compression);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
switch (AH->format)
|
|
|
|
{
|
|
|
|
case archCustom:
|
|
|
|
fmtName = "CUSTOM";
|
|
|
|
break;
|
2013-06-15 22:07:02 +02:00
|
|
|
case archDirectory:
|
|
|
|
fmtName = "DIRECTORY";
|
|
|
|
break;
|
2000-07-21 13:40:08 +02:00
|
|
|
case archTar:
|
|
|
|
fmtName = "TAR";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fmtName = "UNKNOWN";
|
|
|
|
}
|
2000-08-01 17:51:45 +02:00
|
|
|
|
2016-10-25 18:00:00 +02:00
|
|
|
ahprintf(AH, "; Dump Version: %d.%d-%d\n",
|
|
|
|
ARCHIVE_MAJOR(AH->version), ARCHIVE_MINOR(AH->version), ARCHIVE_REV(AH->version));
|
2002-10-22 21:15:23 +02:00
|
|
|
ahprintf(AH, "; Format: %s\n", fmtName);
|
2002-10-27 03:52:10 +01:00
|
|
|
ahprintf(AH, "; Integer: %d bytes\n", (int) AH->intSize);
|
|
|
|
ahprintf(AH, "; Offset: %d bytes\n", (int) AH->offSize);
|
2004-11-06 20:36:02 +01:00
|
|
|
if (AH->archiveRemoteVersion)
|
|
|
|
ahprintf(AH, "; Dumped from database version: %s\n",
|
|
|
|
AH->archiveRemoteVersion);
|
|
|
|
if (AH->archiveDumpVersion)
|
|
|
|
ahprintf(AH, "; Dumped by pg_dump version: %s\n",
|
|
|
|
AH->archiveDumpVersion);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
curSection = SECTION_PRE_DATA;
|
2009-02-02 21:07:37 +01:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (te->section != SECTION_NONE)
|
|
|
|
curSection = te->section;
|
|
|
|
if (ropt->verbose ||
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
(_tocEntryRequired(te, curSection, AH) & (REQ_SCHEMA | REQ_DATA)) != 0)
|
2017-03-10 20:15:09 +01:00
|
|
|
{
|
|
|
|
char *sanitized_name;
|
|
|
|
char *sanitized_schema;
|
|
|
|
char *sanitized_owner;
|
|
|
|
|
|
|
|
/*
|
|
|
|
*/
|
2019-02-01 15:29:42 +01:00
|
|
|
sanitized_name = sanitize_line(te->tag, false);
|
|
|
|
sanitized_schema = sanitize_line(te->namespace, true);
|
|
|
|
sanitized_owner = sanitize_line(te->owner, false);
|
2017-03-10 20:15:09 +01:00
|
|
|
|
2004-10-08 17:03:26 +02:00
|
|
|
ahprintf(AH, "%d; %u %u %s %s %s %s\n", te->dumpId,
|
2003-12-06 04:00:16 +01:00
|
|
|
te->catalogId.tableoid, te->catalogId.oid,
|
2017-03-10 20:15:09 +01:00
|
|
|
te->desc, sanitized_schema, sanitized_name,
|
|
|
|
sanitized_owner);
|
|
|
|
|
|
|
|
free(sanitized_name);
|
|
|
|
free(sanitized_schema);
|
|
|
|
free(sanitized_owner);
|
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
if (ropt->verbose && te->nDeps > 0)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ahprintf(AH, ";\tdepends on:");
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
ahprintf(AH, " %d", te->dependencies[i]);
|
|
|
|
ahprintf(AH, "\n");
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2015-09-14 15:19:49 +02:00
|
|
|
/* Enforce strict names checking */
|
|
|
|
if (ropt->strict_names)
|
|
|
|
StrictNamesCheck(ropt);
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
if (ropt->filename)
|
2011-01-22 23:56:42 +01:00
|
|
|
RestoreOutput(AH, sav);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/***********
|
|
|
|
* BLOB Archival
|
|
|
|
***********/
|
|
|
|
|
|
|
|
/* Called by a dumper to signal start of a BLOB */
|
|
|
|
int
|
2001-04-01 07:42:51 +02:00
|
|
|
StartBlob(Archive *AHX, Oid oid)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
|
|
|
if (!AH->StartBlobPtr)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("large-object output not supported in chosen format");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->StartBlobPtr(AH, AH->currToc, oid);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called by a dumper to signal end of a BLOB */
|
|
|
|
int
|
2001-04-01 07:42:51 +02:00
|
|
|
EndBlob(Archive *AHX, Oid oid)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
|
|
|
if (AH->EndBlobPtr)
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->EndBlobPtr(AH, AH->currToc, oid);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********
|
|
|
|
* BLOB Restoration
|
|
|
|
**********/
|
|
|
|
|
2000-10-31 15:20:30 +01:00
|
|
|
/*
|
|
|
|
* Called by a format handler before any blobs are restored
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
StartRestoreBlobs(ArchiveHandle *AH)
|
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
|
|
|
|
if (!ropt->single_txn)
|
2006-02-15 00:30:43 +01:00
|
|
|
{
|
|
|
|
if (AH->connection)
|
2014-10-14 20:00:55 +02:00
|
|
|
StartTransaction(&AH->public);
|
2006-02-15 00:30:43 +01:00
|
|
|
else
|
|
|
|
ahprintf(AH, "BEGIN;\n\n");
|
|
|
|
}
|
2005-06-21 22:45:44 +02:00
|
|
|
|
2000-10-31 15:20:30 +01:00
|
|
|
AH->blobCount = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by a format handler after all blobs are restored
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
EndRestoreBlobs(ArchiveHandle *AH)
|
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
|
|
|
|
if (!ropt->single_txn)
|
2006-02-15 00:30:43 +01:00
|
|
|
{
|
|
|
|
if (AH->connection)
|
2014-10-14 20:00:55 +02:00
|
|
|
CommitTransaction(&AH->public);
|
2006-02-15 00:30:43 +01:00
|
|
|
else
|
|
|
|
ahprintf(AH, "COMMIT;\n\n");
|
|
|
|
}
|
2004-03-03 22:28:55 +01:00
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info(ngettext("restored %d large object",
|
|
|
|
"restored %d large objects",
|
2009-03-26 23:26:08 +01:00
|
|
|
AH->blobCount),
|
|
|
|
AH->blobCount);
|
2000-10-31 15:20:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Called by a format handler to initiate restoration of a blob
|
|
|
|
*/
|
|
|
|
void
|
2009-07-21 23:46:10 +02:00
|
|
|
StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
bool old_blob_style = (AH->version < K_VERS_1_12);
|
2001-07-03 22:21:50 +02:00
|
|
|
Oid loOid;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2000-10-31 15:20:30 +01:00
|
|
|
AH->blobCount++;
|
|
|
|
|
2002-04-24 04:21:04 +02:00
|
|
|
/* Initialize the LO Buffer */
|
|
|
|
AH->lo_buf_used = 0;
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("restoring large object with OID %u", oid);
|
2005-06-21 22:45:44 +02:00
|
|
|
|
2010-02-18 02:29:10 +01:00
|
|
|
/* With an old archive we must do drop and create logic here */
|
|
|
|
if (old_blob_style && drop)
|
2009-12-14 01:39:11 +01:00
|
|
|
DropBlobIfExists(AH, oid);
|
2009-07-21 23:46:10 +02:00
|
|
|
|
2005-06-21 22:45:44 +02:00
|
|
|
if (AH->connection)
|
2000-10-31 15:20:30 +01:00
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
if (old_blob_style)
|
|
|
|
{
|
|
|
|
loOid = lo_create(AH->connection, oid);
|
|
|
|
if (loOid == 0 || loOid != oid)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not create large object %u: %s",
|
2012-03-20 22:38:11 +01:00
|
|
|
oid, PQerrorMessage(AH->connection));
|
2010-02-18 02:29:10 +01:00
|
|
|
}
|
2005-06-21 22:45:44 +02:00
|
|
|
AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
|
|
|
|
if (AH->loFd == -1)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not open large object %u: %s",
|
2012-03-20 22:38:11 +01:00
|
|
|
oid, PQerrorMessage(AH->connection));
|
2005-06-21 22:45:44 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
if (old_blob_style)
|
|
|
|
ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
|
|
|
|
oid, INV_WRITE);
|
|
|
|
else
|
|
|
|
ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n",
|
|
|
|
oid, INV_WRITE);
|
2000-10-31 15:20:30 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
AH->writingBlob = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-04-01 07:42:51 +02:00
|
|
|
EndRestoreBlob(ArchiveHandle *AH, Oid oid)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2002-05-29 03:38:56 +02:00
|
|
|
if (AH->lo_buf_used > 0)
|
|
|
|
{
|
|
|
|
/* Write remaining bytes from the LO buffer */
|
2005-06-21 22:45:44 +02:00
|
|
|
dump_lo_buf(AH);
|
2002-05-29 03:38:56 +02:00
|
|
|
}
|
2002-04-24 04:21:04 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->writingBlob = 0;
|
|
|
|
|
2005-06-21 22:45:44 +02:00
|
|
|
if (AH->connection)
|
2000-10-31 15:20:30 +01:00
|
|
|
{
|
2005-06-21 22:45:44 +02:00
|
|
|
lo_close(AH->connection, AH->loFd);
|
|
|
|
AH->loFd = -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-07-21 23:46:10 +02:00
|
|
|
ahprintf(AH, "SELECT pg_catalog.lo_close(0);\n\n");
|
2000-10-31 15:20:30 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/***********
|
|
|
|
* Sorting and Reordering
|
|
|
|
***********/
|
|
|
|
|
|
|
|
void
|
2016-01-13 23:48:33 +01:00
|
|
|
SortTocFromFile(Archive *AHX)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
2000-07-04 16:25:28 +02:00
|
|
|
FILE *fh;
|
2020-09-22 22:03:32 +02:00
|
|
|
StringInfoData linebuf;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/* Allocate space for the 'wanted' array, and init it */
|
2019-03-27 04:02:50 +01:00
|
|
|
ropt->idWanted = (bool *) pg_malloc0(sizeof(bool) * AH->maxDumpId);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
/* Setup the file */
|
|
|
|
fh = fopen(ropt->tocFile, PG_BINARY_R);
|
|
|
|
if (!fh)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not open TOC file \"%s\": %m", ropt->tocFile);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2020-09-22 22:03:32 +02:00
|
|
|
initStringInfo(&linebuf);
|
|
|
|
|
|
|
|
while (pg_get_line_buf(fh, &linebuf))
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2011-04-07 17:40:23 +02:00
|
|
|
char *cmnt;
|
|
|
|
char *endptr;
|
|
|
|
DumpId id;
|
|
|
|
TocEntry *te;
|
|
|
|
|
2005-05-17 19:30:29 +02:00
|
|
|
/* Truncate line at comment, if any */
|
2020-09-22 22:03:32 +02:00
|
|
|
cmnt = strchr(linebuf.data, ';');
|
2000-07-04 16:25:28 +02:00
|
|
|
if (cmnt != NULL)
|
2020-09-22 22:03:32 +02:00
|
|
|
{
|
2000-07-04 16:25:28 +02:00
|
|
|
cmnt[0] = '\0';
|
2020-09-22 22:03:32 +02:00
|
|
|
linebuf.len = cmnt - linebuf.data;
|
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2005-05-17 19:30:29 +02:00
|
|
|
/* Ignore if all blank */
|
2020-09-22 22:03:32 +02:00
|
|
|
if (strspn(linebuf.data, " \t\r\n") == linebuf.len)
|
2000-07-04 16:25:28 +02:00
|
|
|
continue;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2005-05-17 19:30:29 +02:00
|
|
|
/* Get an ID, check it's valid and not already seen */
|
2020-09-22 22:03:32 +02:00
|
|
|
id = strtol(linebuf.data, &endptr, 10);
|
|
|
|
if (endptr == linebuf.data || id <= 0 || id > AH->maxDumpId ||
|
2005-05-17 19:30:29 +02:00
|
|
|
ropt->idWanted[id - 1])
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
2020-09-22 22:03:32 +02:00
|
|
|
pg_log_warning("line ignored: %s", linebuf.data);
|
2000-07-04 16:25:28 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find TOC entry */
|
2003-12-06 04:00:16 +01:00
|
|
|
te = getTocEntryByDumpId(AH, id);
|
2000-07-04 16:25:28 +02:00
|
|
|
if (!te)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not find entry for ID %d",
|
2012-03-20 22:38:11 +01:00
|
|
|
id);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
/* Mark it wanted */
|
2003-12-06 04:00:16 +01:00
|
|
|
ropt->idWanted[id - 1] = true;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
/*
|
|
|
|
* Move each item to the end of the list as it is selected, so that
|
|
|
|
* they are placed in the desired order. Any unwanted items will end
|
|
|
|
* up at the front of the list, which may seem unintuitive but it's
|
|
|
|
* what we need. In an ordinary serial restore that makes no
|
|
|
|
* difference, but in a parallel restore we need to mark unrestored
|
|
|
|
* items' dependencies as satisfied before we start examining
|
|
|
|
* restorable items. Otherwise they could have surprising
|
|
|
|
* side-effects on the order in which restorable items actually get
|
|
|
|
* restored.
|
|
|
|
*/
|
2020-08-25 07:24:15 +02:00
|
|
|
_moveBefore(AH->toc, te);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2020-09-22 22:03:32 +02:00
|
|
|
pg_free(linebuf.data);
|
|
|
|
|
2001-01-12 05:32:07 +01:00
|
|
|
if (fclose(fh) != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not close TOC file: %m");
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**********************
|
2020-06-12 14:05:10 +02:00
|
|
|
* Convenience functions that look like standard IO functions
|
2000-07-04 16:25:28 +02:00
|
|
|
* for writing data when in dump mode.
|
|
|
|
**********************/
|
|
|
|
|
|
|
|
/* Public */
|
2014-05-06 02:27:16 +02:00
|
|
|
void
|
2000-07-04 16:25:28 +02:00
|
|
|
archputs(const char *s, Archive *AH)
|
|
|
|
{
|
2014-05-06 02:27:16 +02:00
|
|
|
WriteData(AH, s, strlen(s));
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Public */
|
|
|
|
int
|
|
|
|
archprintf(Archive *AH, const char *fmt,...)
|
|
|
|
{
|
2018-09-26 19:31:56 +02:00
|
|
|
int save_errno = errno;
|
2013-10-25 03:43:57 +02:00
|
|
|
char *p;
|
|
|
|
size_t len = 128; /* initial assumption about buffer size */
|
|
|
|
size_t cnt;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-10-25 03:43:57 +02:00
|
|
|
for (;;)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2013-10-25 03:43:57 +02:00
|
|
|
va_list args;
|
|
|
|
|
|
|
|
/* Allocate work buffer. */
|
|
|
|
p = (char *) pg_malloc(len);
|
|
|
|
|
|
|
|
/* Try to format the data. */
|
2018-09-26 19:31:56 +02:00
|
|
|
errno = save_errno;
|
2013-10-25 03:43:57 +02:00
|
|
|
va_start(args, fmt);
|
|
|
|
cnt = pvsnprintf(p, len, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
if (cnt < len)
|
|
|
|
break; /* success */
|
|
|
|
|
|
|
|
/* Release buffer and loop around to try again with larger len. */
|
|
|
|
free(p);
|
|
|
|
len = cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
2013-10-25 03:43:57 +02:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
WriteData(AH, p, cnt);
|
|
|
|
free(p);
|
2013-10-25 03:43:57 +02:00
|
|
|
return (int) cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*******************************
|
|
|
|
* Stuff below here should be 'private' to the archiver routines
|
|
|
|
*******************************/
|
|
|
|
|
2011-01-22 23:56:42 +01:00
|
|
|
static void
|
2012-02-07 22:20:29 +01:00
|
|
|
SetOutput(ArchiveHandle *AH, const char *filename, int compression)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2003-10-20 23:05:12 +02:00
|
|
|
int fn;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
if (filename)
|
2019-04-04 21:34:58 +02:00
|
|
|
{
|
|
|
|
if (strcmp(filename, "-") == 0)
|
|
|
|
fn = fileno(stdout);
|
|
|
|
else
|
|
|
|
fn = -1;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
else if (AH->FH)
|
|
|
|
fn = fileno(AH->FH);
|
|
|
|
else if (AH->fSpec)
|
|
|
|
{
|
2003-10-20 23:05:12 +02:00
|
|
|
fn = -1;
|
2000-07-04 16:25:28 +02:00
|
|
|
filename = AH->fSpec;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
fn = fileno(stdout);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/* If compression explicitly requested, use gzopen */
|
2000-07-06 20:39:39 +02:00
|
|
|
#ifdef HAVE_LIBZ
|
2000-07-04 16:25:28 +02:00
|
|
|
if (compression != 0)
|
|
|
|
{
|
2018-03-15 16:10:41 +01:00
|
|
|
char fmode[14];
|
2003-10-20 23:05:12 +02:00
|
|
|
|
|
|
|
/* Don't use PG_BINARY_x since this is zlib */
|
2001-01-12 05:32:07 +01:00
|
|
|
sprintf(fmode, "wb%d", compression);
|
2003-10-20 23:05:12 +02:00
|
|
|
if (fn >= 0)
|
|
|
|
AH->OF = gzdopen(dup(fn), fmode);
|
2001-03-22 05:01:46 +01:00
|
|
|
else
|
2001-01-12 05:32:07 +01:00
|
|
|
AH->OF = gzopen(filename, fmode);
|
|
|
|
AH->gzOut = 1;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
2003-10-20 23:05:12 +02:00
|
|
|
{ /* Use fopen */
|
2007-01-25 04:30:43 +01:00
|
|
|
if (AH->mode == archModeAppend)
|
|
|
|
{
|
|
|
|
if (fn >= 0)
|
|
|
|
AH->OF = fdopen(dup(fn), PG_BINARY_A);
|
|
|
|
else
|
|
|
|
AH->OF = fopen(filename, PG_BINARY_A);
|
|
|
|
}
|
2001-01-12 05:32:07 +01:00
|
|
|
else
|
2007-01-25 04:30:43 +01:00
|
|
|
{
|
|
|
|
if (fn >= 0)
|
|
|
|
AH->OF = fdopen(dup(fn), PG_BINARY_W);
|
|
|
|
else
|
|
|
|
AH->OF = fopen(filename, PG_BINARY_W);
|
|
|
|
}
|
2001-01-12 05:32:07 +01:00
|
|
|
AH->gzOut = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-01-12 05:32:07 +01:00
|
|
|
if (!AH->OF)
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
|
|
|
if (filename)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not open output file \"%s\": %m", filename);
|
2007-10-28 22:55:52 +01:00
|
|
|
else
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not open output file: %m");
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2011-01-22 23:56:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static OutputContext
|
|
|
|
SaveOutput(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
OutputContext sav;
|
|
|
|
|
|
|
|
sav.OF = AH->OF;
|
|
|
|
sav.gzOut = AH->gzOut;
|
2001-01-12 05:32:07 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
return sav;
|
|
|
|
}
|
|
|
|
|
2006-07-18 19:42:01 +02:00
|
|
|
static void
|
2011-01-22 23:56:42 +01:00
|
|
|
RestoreOutput(ArchiveHandle *AH, OutputContext savedContext)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2001-01-12 05:32:07 +01:00
|
|
|
int res;
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
if (AH->gzOut)
|
2001-01-12 05:32:07 +01:00
|
|
|
res = GZCLOSE(AH->OF);
|
2000-07-04 16:25:28 +02:00
|
|
|
else
|
2001-01-12 05:32:07 +01:00
|
|
|
res = fclose(AH->OF);
|
|
|
|
|
|
|
|
if (res != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not close output file: %m");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2011-01-22 23:56:42 +01:00
|
|
|
AH->gzOut = savedContext.gzOut;
|
|
|
|
AH->OF = savedContext.OF;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print formatted text to the output file (usually stdout).
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ahprintf(ArchiveHandle *AH, const char *fmt,...)
|
|
|
|
{
|
2018-09-26 19:31:56 +02:00
|
|
|
int save_errno = errno;
|
2013-10-25 03:43:57 +02:00
|
|
|
char *p;
|
|
|
|
size_t len = 128; /* initial assumption about buffer size */
|
|
|
|
size_t cnt;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-10-25 03:43:57 +02:00
|
|
|
for (;;)
|
2001-02-23 23:52:32 +01:00
|
|
|
{
|
2013-10-25 03:43:57 +02:00
|
|
|
va_list args;
|
|
|
|
|
|
|
|
/* Allocate work buffer. */
|
|
|
|
p = (char *) pg_malloc(len);
|
|
|
|
|
|
|
|
/* Try to format the data. */
|
2018-09-26 19:31:56 +02:00
|
|
|
errno = save_errno;
|
2013-10-25 03:43:57 +02:00
|
|
|
va_start(args, fmt);
|
|
|
|
cnt = pvsnprintf(p, len, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
if (cnt < len)
|
|
|
|
break; /* success */
|
|
|
|
|
|
|
|
/* Release buffer and loop around to try again with larger len. */
|
|
|
|
free(p);
|
|
|
|
len = cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
2013-10-25 03:43:57 +02:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
ahwrite(p, 1, cnt, AH);
|
|
|
|
free(p);
|
2013-10-25 03:43:57 +02:00
|
|
|
return (int) cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2000-07-24 08:24:26 +02:00
|
|
|
/*
|
|
|
|
* Single place for logic which says 'We are restoring to a direct DB connection'.
|
|
|
|
*/
|
2006-07-18 19:42:01 +02:00
|
|
|
static int
|
2000-07-24 08:24:26 +02:00
|
|
|
RestoringToDB(ArchiveHandle *AH)
|
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
|
|
|
|
return (ropt && ropt->useDB && AH->connection);
|
2000-07-24 08:24:26 +02:00
|
|
|
}
|
|
|
|
|
2005-06-21 22:45:44 +02:00
|
|
|
/*
|
|
|
|
* Dump the current contents of the LO data buffer while writing a BLOB
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
dump_lo_buf(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
if (AH->connection)
|
|
|
|
{
|
2020-10-18 18:26:02 +02:00
|
|
|
int res;
|
2005-06-21 22:45:44 +02:00
|
|
|
|
|
|
|
res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used);
|
2020-10-18 18:26:02 +02:00
|
|
|
pg_log_debug(ngettext("wrote %zu byte of large object data (result = %d)",
|
|
|
|
"wrote %zu bytes of large object data (result = %d)",
|
2009-03-26 23:26:08 +01:00
|
|
|
AH->lo_buf_used),
|
2020-10-18 18:26:02 +02:00
|
|
|
AH->lo_buf_used, res);
|
|
|
|
/* We assume there are no short writes, only errors */
|
2005-06-21 22:45:44 +02:00
|
|
|
if (res != AH->lo_buf_used)
|
2020-10-18 18:26:02 +02:00
|
|
|
warn_or_exit_horribly(AH, "could not write to large object: %s",
|
|
|
|
PQerrorMessage(AH->connection));
|
2005-06-21 22:45:44 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-08-04 23:56:09 +02:00
|
|
|
PQExpBuffer buf = createPQExpBuffer();
|
2005-06-21 22:45:44 +02:00
|
|
|
|
2009-08-04 23:56:09 +02:00
|
|
|
appendByteaLiteralAHX(buf,
|
|
|
|
(const unsigned char *) AH->lo_buf,
|
|
|
|
AH->lo_buf_used,
|
|
|
|
AH);
|
2005-06-21 22:45:44 +02:00
|
|
|
|
|
|
|
/* Hack: turn off writingBlob so ahwrite doesn't recurse to here */
|
|
|
|
AH->writingBlob = 0;
|
2009-08-04 23:56:09 +02:00
|
|
|
ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data);
|
2005-06-21 22:45:44 +02:00
|
|
|
AH->writingBlob = 1;
|
|
|
|
|
2009-08-04 23:56:09 +02:00
|
|
|
destroyPQExpBuffer(buf);
|
2005-06-21 22:45:44 +02:00
|
|
|
}
|
|
|
|
AH->lo_buf_used = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/*
|
2011-07-28 20:06:57 +02:00
|
|
|
* Write buffer to the output file (usually stdout). This is used for
|
2000-07-21 13:40:08 +02:00
|
|
|
* outputting 'restore' scripts etc. It is even possible for an archive
|
|
|
|
* format to create a custom output routine to 'fake' a restore if it
|
|
|
|
* wants to generate a script (see TAR output).
|
2000-07-04 16:25:28 +02:00
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
void
|
2000-07-04 16:25:28 +02:00
|
|
|
ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
|
|
|
|
{
|
2014-05-06 02:27:16 +02:00
|
|
|
int bytes_written = 0;
|
2014-05-06 18:12:18 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->writingBlob)
|
|
|
|
{
|
2005-06-21 22:45:44 +02:00
|
|
|
size_t remaining = size * nmemb;
|
|
|
|
|
|
|
|
while (AH->lo_buf_used + remaining > AH->lo_buf_size)
|
2002-08-20 19:54:45 +02:00
|
|
|
{
|
2005-06-21 22:45:44 +02:00
|
|
|
size_t avail = AH->lo_buf_size - AH->lo_buf_used;
|
|
|
|
|
|
|
|
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, avail);
|
|
|
|
ptr = (const void *) ((const char *) ptr + avail);
|
|
|
|
remaining -= avail;
|
|
|
|
AH->lo_buf_used += avail;
|
|
|
|
dump_lo_buf(AH);
|
2002-08-20 19:54:45 +02:00
|
|
|
}
|
|
|
|
|
2005-06-21 22:45:44 +02:00
|
|
|
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining);
|
|
|
|
AH->lo_buf_used += remaining;
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
bytes_written = size * nmemb;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
else if (AH->gzOut)
|
2014-05-06 02:27:16 +02:00
|
|
|
bytes_written = GZWRITE(ptr, size, nmemb, AH->OF);
|
2000-07-21 13:40:08 +02:00
|
|
|
else if (AH->CustomOutPtr)
|
2014-05-06 02:27:16 +02:00
|
|
|
bytes_written = AH->CustomOutPtr(AH, ptr, size * nmemb);
|
2014-05-06 18:12:18 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If we're doing a restore, and it's direct to DB, and we're
|
|
|
|
* connected then send it to the DB.
|
|
|
|
*/
|
2000-07-24 08:24:26 +02:00
|
|
|
if (RestoringToDB(AH))
|
2014-10-14 20:00:55 +02:00
|
|
|
bytes_written = ExecuteSqlCommandBuf(&AH->public, (const char *) ptr, size * nmemb);
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
2014-05-06 02:27:16 +02:00
|
|
|
bytes_written = fwrite(ptr, size, nmemb, AH->OF) * size;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
2014-05-06 02:27:16 +02:00
|
|
|
|
|
|
|
if (bytes_written != size * nmemb)
|
|
|
|
WRITE_ERROR_EXIT;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2004-04-22 04:39:10 +02:00
|
|
|
/* on some error, we may decide to go on... */
|
|
|
|
void
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...)
|
2004-04-22 04:39:10 +02:00
|
|
|
{
|
|
|
|
va_list ap;
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
|
|
|
|
switch (AH->stage)
|
|
|
|
{
|
|
|
|
|
|
|
|
case STAGE_NONE:
|
|
|
|
/* Do nothing special */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STAGE_INITIALIZING:
|
|
|
|
if (AH->stage != AH->lastErrorStage)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_generic(PG_LOG_INFO, "while INITIALIZING:");
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case STAGE_PROCESSING:
|
|
|
|
if (AH->stage != AH->lastErrorStage)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_generic(PG_LOG_INFO, "while PROCESSING TOC:");
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case STAGE_FINALIZING:
|
|
|
|
if (AH->stage != AH->lastErrorStage)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_generic(PG_LOG_INFO, "while FINALIZING:");
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
|
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_generic(PG_LOG_INFO, "from TOC entry %d; %u %u %s %s %s",
|
2005-04-15 18:40:36 +02:00
|
|
|
AH->currentTE->dumpId,
|
2019-02-10 01:45:38 +01:00
|
|
|
AH->currentTE->catalogId.tableoid,
|
|
|
|
AH->currentTE->catalogId.oid,
|
|
|
|
AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
|
|
|
|
AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
|
|
|
|
AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
}
|
|
|
|
AH->lastErrorStage = AH->stage;
|
|
|
|
AH->lastErrorTE = AH->currentTE;
|
|
|
|
|
2004-04-22 04:39:10 +02:00
|
|
|
va_start(ap, fmt);
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_generic_v(PG_LOG_ERROR, fmt, ap);
|
2012-03-20 22:38:11 +01:00
|
|
|
va_end(ap);
|
|
|
|
|
2004-08-20 06:20:23 +02:00
|
|
|
if (AH->public.exit_on_error)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_nicely(1);
|
2004-04-22 04:39:10 +02:00
|
|
|
else
|
|
|
|
AH->public.n_errors++;
|
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
#ifdef NOT_USED
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
static void
|
|
|
|
_moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
|
|
|
|
{
|
2010-08-21 15:59:44 +02:00
|
|
|
/* Unlink te from list */
|
2000-07-04 16:25:28 +02:00
|
|
|
te->prev->next = te->next;
|
|
|
|
te->next->prev = te->prev;
|
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
/* and insert it after "pos" */
|
2000-07-04 16:25:28 +02:00
|
|
|
te->prev = pos;
|
|
|
|
te->next = pos->next;
|
|
|
|
pos->next->prev = te;
|
|
|
|
pos->next = te;
|
|
|
|
}
|
2010-08-21 15:59:44 +02:00
|
|
|
#endif
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
static void
|
2020-08-25 07:24:15 +02:00
|
|
|
_moveBefore(TocEntry *pos, TocEntry *te)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2010-08-21 15:59:44 +02:00
|
|
|
/* Unlink te from list */
|
2000-07-04 16:25:28 +02:00
|
|
|
te->prev->next = te->next;
|
|
|
|
te->next->prev = te->prev;
|
|
|
|
|
2010-08-21 15:59:44 +02:00
|
|
|
/* and insert it before "pos" */
|
2000-07-04 16:25:28 +02:00
|
|
|
te->prev = pos->prev;
|
|
|
|
te->next = pos;
|
|
|
|
pos->prev->next = te;
|
|
|
|
pos->prev = te;
|
|
|
|
}
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
/*
|
|
|
|
* Build index arrays for the TOC list
|
|
|
|
*
|
|
|
|
* This should be invoked only after we have created or read in all the TOC
|
|
|
|
* items.
|
|
|
|
*
|
|
|
|
* The arrays are indexed by dump ID (so entry zero is unused). Note that the
|
|
|
|
* array entries run only up to maxDumpId. We might see dependency dump IDs
|
|
|
|
* beyond that (if the dump was partial); so always check the array bound
|
|
|
|
* before trying to touch an array entry.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
buildTocEntryArrays(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
DumpId maxDumpId = AH->maxDumpId;
|
2000-07-04 16:25:28 +02:00
|
|
|
TocEntry *te;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
AH->tocsByDumpId = (TocEntry **) pg_malloc0((maxDumpId + 1) * sizeof(TocEntry *));
|
|
|
|
AH->tableDataId = (DumpId *) pg_malloc0((maxDumpId + 1) * sizeof(DumpId));
|
2012-05-29 02:38:28 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
/* this check is purely paranoia, maxDumpId should be correct */
|
|
|
|
if (te->dumpId <= 0 || te->dumpId > maxDumpId)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("bad dumpId");
|
2012-05-29 02:38:28 +02:00
|
|
|
|
|
|
|
/* tocsByDumpId indexes all TOCs by their dump ID */
|
|
|
|
AH->tocsByDumpId[te->dumpId] = te;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tableDataId provides the TABLE DATA item's dump ID for each TABLE
|
|
|
|
* TOC entry that has a DATA item. We compute this by reversing the
|
|
|
|
* TABLE DATA item's dependency, knowing that a TABLE DATA item has
|
|
|
|
* just one dependency and it is the TABLE item.
|
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "TABLE DATA") == 0 && te->nDeps > 0)
|
|
|
|
{
|
|
|
|
DumpId tableId = te->dependencies[0];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The TABLE item might not have been in the archive, if this was
|
|
|
|
* a data-only dump; but its dump ID should be less than its data
|
|
|
|
* item's dump ID, so there should be a place for it in the array.
|
|
|
|
*/
|
|
|
|
if (tableId <= 0 || tableId > maxDumpId)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("bad table dumpId for TABLE DATA item");
|
2012-05-29 02:38:28 +02:00
|
|
|
|
|
|
|
AH->tableDataId[tableId] = te->dumpId;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
2012-05-29 02:38:28 +02:00
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
TocEntry *
|
2012-05-29 02:38:28 +02:00
|
|
|
getTocEntryByDumpId(ArchiveHandle *AH, DumpId id)
|
|
|
|
{
|
|
|
|
/* build index arrays if we didn't already */
|
|
|
|
if (AH->tocsByDumpId == NULL)
|
|
|
|
buildTocEntryArrays(AH);
|
|
|
|
|
|
|
|
if (id > 0 && id <= AH->maxDumpId)
|
|
|
|
return AH->tocsByDumpId[id];
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-11 19:15:30 +01:00
|
|
|
int
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
TocIDRequired(ArchiveHandle *AH, DumpId id)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2003-12-06 04:00:16 +01:00
|
|
|
TocEntry *te = getTocEntryByDumpId(AH, id);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
if (!te)
|
|
|
|
return 0;
|
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
return te->reqs;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
size_t
|
2007-02-19 16:05:06 +01:00
|
|
|
WriteOffset(ArchiveHandle *AH, pgoff_t o, int wasSet)
|
2002-10-22 21:15:23 +02:00
|
|
|
{
|
|
|
|
int off;
|
|
|
|
|
|
|
|
/* Save the flag */
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteBytePtr(AH, wasSet);
|
2002-10-22 21:15:23 +02:00
|
|
|
|
2007-02-19 16:05:06 +01:00
|
|
|
/* Write out pgoff_t smallest byte first, prevents endian mismatch */
|
|
|
|
for (off = 0; off < sizeof(pgoff_t); off++)
|
2002-10-22 21:15:23 +02:00
|
|
|
{
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteBytePtr(AH, o & 0xFF);
|
2002-10-22 21:15:23 +02:00
|
|
|
o >>= 8;
|
|
|
|
}
|
2007-02-19 16:05:06 +01:00
|
|
|
return sizeof(pgoff_t) + 1;
|
2002-10-22 21:15:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-02-19 16:05:06 +01:00
|
|
|
ReadOffset(ArchiveHandle *AH, pgoff_t * o)
|
2002-10-22 21:15:23 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int off;
|
|
|
|
int offsetFlg;
|
|
|
|
|
|
|
|
/* Initialize to zero */
|
|
|
|
*o = 0;
|
|
|
|
|
|
|
|
/* Check for old version */
|
|
|
|
if (AH->version < K_VERS_1_7)
|
|
|
|
{
|
|
|
|
/* Prior versions wrote offsets using WriteInt */
|
|
|
|
i = ReadInt(AH);
|
|
|
|
/* -1 means not set */
|
|
|
|
if (i < 0)
|
|
|
|
return K_OFFSET_POS_NOT_SET;
|
|
|
|
else if (i == 0)
|
|
|
|
return K_OFFSET_NO_DATA;
|
|
|
|
|
2007-02-19 16:05:06 +01:00
|
|
|
/* Cast to pgoff_t because it was written as an int. */
|
|
|
|
*o = (pgoff_t) i;
|
2002-10-22 21:15:23 +02:00
|
|
|
return K_OFFSET_POS_SET;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the flag indicating the state of the data pointer. Check if valid
|
|
|
|
* and die if not.
|
|
|
|
*
|
|
|
|
* This used to be handled by a negative or zero pointer, now we use an
|
|
|
|
* extra byte specifically for the state.
|
|
|
|
*/
|
2017-09-07 18:06:23 +02:00
|
|
|
offsetFlg = AH->ReadBytePtr(AH) & 0xFF;
|
2002-10-22 21:15:23 +02:00
|
|
|
|
|
|
|
switch (offsetFlg)
|
|
|
|
{
|
|
|
|
case K_OFFSET_POS_NOT_SET:
|
|
|
|
case K_OFFSET_NO_DATA:
|
|
|
|
case K_OFFSET_POS_SET:
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("unexpected data offset flag %d", offsetFlg);
|
2002-10-22 21:15:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the bytes
|
|
|
|
*/
|
|
|
|
for (off = 0; off < AH->offSize; off++)
|
|
|
|
{
|
2007-02-19 16:05:06 +01:00
|
|
|
if (off < sizeof(pgoff_t))
|
2017-09-07 18:06:23 +02:00
|
|
|
*o |= ((pgoff_t) (AH->ReadBytePtr(AH))) << (off * 8);
|
2002-10-22 21:15:23 +02:00
|
|
|
else
|
|
|
|
{
|
2017-09-07 18:06:23 +02:00
|
|
|
if (AH->ReadBytePtr(AH) != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("file offset in dump file is too large");
|
2002-10-22 21:15:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return offsetFlg;
|
|
|
|
}
|
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t
|
2000-07-04 16:25:28 +02:00
|
|
|
WriteInt(ArchiveHandle *AH, int i)
|
|
|
|
{
|
|
|
|
int b;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/*
|
|
|
|
* This is a bit yucky, but I don't want to make the binary format very
|
2002-11-23 04:59:09 +01:00
|
|
|
* dependent on representation, and not knowing much about it, I write out
|
2000-07-04 16:25:28 +02:00
|
|
|
* a sign byte. If you change this, don't forget to change the file
|
2019-08-13 06:53:41 +02:00
|
|
|
* version #, and modify ReadInt to read the new format AS WELL AS the old
|
2000-07-04 16:25:28 +02:00
|
|
|
* formats.
|
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/* SIGN byte */
|
|
|
|
if (i < 0)
|
|
|
|
{
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteBytePtr(AH, 1);
|
2000-07-21 13:40:08 +02:00
|
|
|
i = -i;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
else
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteBytePtr(AH, 0);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
for (b = 0; b < AH->intSize; b++)
|
|
|
|
{
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteBytePtr(AH, i & 0xFF);
|
2002-05-29 03:38:56 +02:00
|
|
|
i >>= 8;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
return AH->intSize + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ReadInt(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
int res = 0;
|
|
|
|
int bv,
|
|
|
|
b;
|
|
|
|
int sign = 0; /* Default positive */
|
2000-07-21 13:40:08 +02:00
|
|
|
int bitShift = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
if (AH->version > K_VERS_1_0)
|
2000-07-21 13:40:08 +02:00
|
|
|
/* Read a sign byte */
|
2017-09-07 18:06:23 +02:00
|
|
|
sign = AH->ReadBytePtr(AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
for (b = 0; b < AH->intSize; b++)
|
|
|
|
{
|
2017-09-07 18:06:23 +02:00
|
|
|
bv = AH->ReadBytePtr(AH) & 0xFF;
|
2000-07-21 13:40:08 +02:00
|
|
|
if (bv != 0)
|
|
|
|
res = res + (bv << bitShift);
|
|
|
|
bitShift += 8;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sign)
|
2000-07-21 13:40:08 +02:00
|
|
|
res = -res;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t
|
2001-04-01 07:42:51 +02:00
|
|
|
WriteStr(ArchiveHandle *AH, const char *c)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t res;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
if (c)
|
|
|
|
{
|
2014-05-06 02:27:16 +02:00
|
|
|
int len = strlen(c);
|
2014-05-06 18:12:18 +02:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
res = WriteInt(AH, len);
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteBufPtr(AH, c, len);
|
2014-05-06 02:27:16 +02:00
|
|
|
res += len;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
res = WriteInt(AH, -1);
|
|
|
|
|
|
|
|
return res;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
char *
|
|
|
|
ReadStr(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
char *buf;
|
|
|
|
int l;
|
|
|
|
|
|
|
|
l = ReadInt(AH);
|
2007-08-06 03:38:15 +02:00
|
|
|
if (l < 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
buf = NULL;
|
|
|
|
else
|
|
|
|
{
|
2011-11-25 21:40:51 +01:00
|
|
|
buf = (char *) pg_malloc(l + 1);
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->ReadBufPtr(AH, (void *) buf, l);
|
2007-08-06 03:38:15 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
buf[l] = '\0';
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2000-12-07 03:52:27 +01:00
|
|
|
static int
|
|
|
|
_discoverArchiveFormat(ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
|
|
|
FILE *fh;
|
|
|
|
char sig[6]; /* More than enough */
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
int wantClose = 0;
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("attempting to ascertain archive format");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
if (AH->lookahead)
|
|
|
|
free(AH->lookahead);
|
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
AH->readHeader = 0;
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookaheadSize = 512;
|
2012-10-02 21:35:10 +02:00
|
|
|
AH->lookahead = pg_malloc0(512);
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookaheadLen = 0;
|
|
|
|
AH->lookaheadPos = 0;
|
2000-07-06 20:39:39 +02:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
if (AH->fSpec)
|
|
|
|
{
|
2011-01-23 22:10:15 +01:00
|
|
|
struct stat st;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
wantClose = 1;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the specified archive is a directory. If so, check if
|
|
|
|
* there's a "toc.dat" (or "toc.dat.gz") file in it.
|
|
|
|
*/
|
|
|
|
if (stat(AH->fSpec, &st) == 0 && S_ISDIR(st.st_mode))
|
|
|
|
{
|
|
|
|
char buf[MAXPGPATH];
|
2011-04-10 17:42:00 +02:00
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("directory name too long: \"%s\"",
|
2012-03-20 22:38:11 +01:00
|
|
|
AH->fSpec);
|
2011-01-23 22:10:15 +01:00
|
|
|
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
|
|
|
|
{
|
|
|
|
AH->format = archDirectory;
|
|
|
|
return AH->format;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_LIBZ
|
|
|
|
if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("directory name too long: \"%s\"",
|
2012-03-20 22:38:11 +01:00
|
|
|
AH->fSpec);
|
2011-01-23 22:10:15 +01:00
|
|
|
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
|
|
|
|
{
|
|
|
|
AH->format = archDirectory;
|
|
|
|
return AH->format;
|
|
|
|
}
|
|
|
|
#endif
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
|
2012-03-20 22:38:11 +01:00
|
|
|
AH->fSpec);
|
2011-01-24 07:28:00 +01:00
|
|
|
fh = NULL; /* keep compiler quiet */
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
fh = fopen(AH->fSpec, PG_BINARY_R);
|
|
|
|
if (!fh)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not open input file \"%s\": %m", AH->fSpec);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
else
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
fh = stdin;
|
2007-10-28 22:55:52 +01:00
|
|
|
if (!fh)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not open input file: %m");
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
if ((cnt = fread(sig, 1, 5, fh)) != 5)
|
2001-06-27 23:21:37 +02:00
|
|
|
{
|
|
|
|
if (ferror(fh))
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not read input file: %m");
|
2001-06-27 23:21:37 +02:00
|
|
|
else
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("input file is too short (read %lu, expected 5)",
|
2012-03-20 22:38:11 +01:00
|
|
|
(unsigned long) cnt);
|
2001-06-27 23:21:37 +02:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/* Save it, just in case we need it later */
|
Replace a bunch more uses of strncpy() with safer coding.
strncpy() has a well-deserved reputation for being unsafe, so make an
effort to get rid of nearly all occurrences in HEAD.
A large fraction of the remaining uses were passing length less than or
equal to the known strlen() of the source, in which case no null-padding
can occur and the behavior is equivalent to memcpy(), though doubtless
slower and certainly harder to reason about. So just use memcpy() in
these cases.
In other cases, use either StrNCpy() or strlcpy() as appropriate (depending
on whether padding to the full length of the destination buffer seems
useful).
I left a few strncpy() calls alone in the src/timezone/ code, to keep it
in sync with upstream (the IANA tzcode distribution). There are also a
few such calls in ecpg that could possibly do with more analysis.
AFAICT, none of these changes are more than cosmetic, except for the four
occurrences in fe-secure-openssl.c, which are in fact buggy: an overlength
source leads to a non-null-terminated destination buffer and ensuing
misbehavior. These don't seem like security issues, first because no stack
clobber is possible and second because if your values of sslcert etc are
coming from untrusted sources then you've got problems way worse than this.
Still, it's undesirable to have unpredictable behavior for overlength
inputs, so back-patch those four changes to all active branches.
2015-01-24 19:05:42 +01:00
|
|
|
memcpy(&AH->lookahead[0], sig, 5);
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookaheadLen = 5;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (strncmp(sig, "PGDMP", 5) == 0)
|
|
|
|
{
|
2021-04-01 19:34:16 +02:00
|
|
|
/* It's custom format, stop here */
|
|
|
|
AH->format = archCustom;
|
|
|
|
AH->readHeader = 1;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2012-01-03 22:02:49 +01:00
|
|
|
* *Maybe* we have a tar archive format file or a text dump ... So,
|
|
|
|
* read first 512 byte header...
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh);
|
2014-05-06 02:27:16 +02:00
|
|
|
/* read failure is checked below */
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->lookaheadLen += cnt;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2012-01-03 22:02:49 +01:00
|
|
|
if (AH->lookaheadLen >= strlen(TEXT_DUMPALL_HEADER) &&
|
|
|
|
(strncmp(AH->lookahead, TEXT_DUMP_HEADER, strlen(TEXT_DUMP_HEADER)) == 0 ||
|
|
|
|
strncmp(AH->lookahead, TEXT_DUMPALL_HEADER, strlen(TEXT_DUMPALL_HEADER)) == 0))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* looks like it's probably a text format dump. so suggest they
|
|
|
|
* try psql
|
|
|
|
*/
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("input file appears to be a text format dump. Please use psql.");
|
2012-01-03 22:02:49 +01:00
|
|
|
}
|
|
|
|
|
2014-05-06 16:00:57 +02:00
|
|
|
if (AH->lookaheadLen != 512)
|
|
|
|
{
|
|
|
|
if (feof(fh))
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("input file does not appear to be a valid archive (too short?)");
|
2014-05-06 16:00:57 +02:00
|
|
|
else
|
|
|
|
READ_ERROR_EXIT(fh);
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (!isValidTarHeader(AH->lookahead))
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("input file does not appear to be a valid archive");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->format = archTar;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
/* Close the file if we opened it */
|
2000-07-04 16:25:28 +02:00
|
|
|
if (wantClose)
|
2021-04-01 19:34:16 +02:00
|
|
|
{
|
2001-01-12 05:32:07 +01:00
|
|
|
if (fclose(fh) != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not close input file: %m");
|
2021-04-01 19:34:16 +02:00
|
|
|
/* Forget lookahead, since we'll re-read header after re-opening */
|
|
|
|
AH->readHeader = 0;
|
|
|
|
AH->lookaheadLen = 0;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
return AH->format;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an archive handle
|
|
|
|
*/
|
2000-07-21 13:40:08 +02:00
|
|
|
static ArchiveHandle *
|
|
|
|
_allocAH(const char *FileSpec, const ArchiveFormat fmt,
|
2017-03-22 15:00:30 +01:00
|
|
|
const int compression, bool dosync, ArchiveMode mode,
|
2016-08-30 18:00:00 +02:00
|
|
|
SetupWorkerPtrType setupWorkerPtr)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2000-07-04 16:25:28 +02:00
|
|
|
ArchiveHandle *AH;
|
|
|
|
|
2020-09-17 18:52:18 +02:00
|
|
|
pg_log_debug("allocating AH for %s, format %d",
|
|
|
|
FileSpec ? FileSpec : "(stdio)", fmt);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
AH = (ArchiveHandle *) pg_malloc0(sizeof(ArchiveHandle));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2016-10-25 18:00:00 +02:00
|
|
|
AH->version = K_VERS_SELF;
|
2010-02-18 02:29:10 +01:00
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
/* initialize for backwards compatible string processing */
|
2007-10-13 22:18:42 +02:00
|
|
|
AH->public.encoding = 0; /* PG_SQL_ASCII */
|
2006-05-28 23:13:54 +02:00
|
|
|
AH->public.std_strings = false;
|
|
|
|
|
|
|
|
/* sql error handling */
|
|
|
|
AH->public.exit_on_error = true;
|
|
|
|
AH->public.n_errors = 0;
|
|
|
|
|
2010-02-24 03:42:55 +01:00
|
|
|
AH->archiveDumpVersion = PG_VERSION;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->createDate = time(NULL);
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
AH->intSize = sizeof(int);
|
2007-02-19 16:05:06 +01:00
|
|
|
AH->offSize = sizeof(pgoff_t);
|
2000-07-04 16:25:28 +02:00
|
|
|
if (FileSpec)
|
|
|
|
{
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->fSpec = pg_strdup(FileSpec);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Not used; maybe later....
|
|
|
|
*
|
2011-11-25 21:40:51 +01:00
|
|
|
* AH->workDir = pg_strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ;
|
2000-07-21 13:40:08 +02:00
|
|
|
* i--) if (AH->workDir[i-1] == '/')
|
|
|
|
*/
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
else
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->fSpec = NULL;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->currUser = NULL; /* unknown */
|
|
|
|
AH->currSchema = NULL; /* ditto */
|
|
|
|
AH->currTablespace = NULL; /* ditto */
|
2019-03-06 18:54:38 +01:00
|
|
|
AH->currTableAm = NULL; /* ditto */
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
AH->toc->next = AH->toc;
|
|
|
|
AH->toc->prev = AH->toc;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
AH->mode = mode;
|
|
|
|
AH->compression = compression;
|
2017-03-22 15:00:30 +01:00
|
|
|
AH->dosync = dosync;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
memset(&(AH->sqlparse), 0, sizeof(AH->sqlparse));
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/* Open stdout with no compression for AH output handle */
|
|
|
|
AH->gzOut = 0;
|
|
|
|
AH->OF = stdout;
|
|
|
|
|
2005-01-26 20:44:43 +01:00
|
|
|
/*
|
2017-06-19 17:02:45 +02:00
|
|
|
* On Windows, we need to use binary mode to read/write non-text files,
|
|
|
|
* which include all archive formats as well as compressed plain text.
|
|
|
|
* Force stdin/stdout into binary mode if that is what we are using.
|
2005-01-26 20:44:43 +01:00
|
|
|
*/
|
|
|
|
#ifdef WIN32
|
2017-06-19 17:02:45 +02:00
|
|
|
if ((fmt != archNull || compression != 0) &&
|
2005-09-11 02:36:14 +02:00
|
|
|
(AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0))
|
2005-01-26 20:44:43 +01:00
|
|
|
{
|
|
|
|
if (mode == archModeWrite)
|
2017-07-14 15:02:53 +02:00
|
|
|
_setmode(fileno(stdout), O_BINARY);
|
2005-01-26 20:44:43 +01:00
|
|
|
else
|
2017-07-14 15:02:53 +02:00
|
|
|
_setmode(fileno(stdin), O_BINARY);
|
2005-01-26 20:44:43 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->SetupWorkerPtr = setupWorkerPtr;
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
if (fmt == archUnknown)
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->format = _discoverArchiveFormat(AH);
|
|
|
|
else
|
|
|
|
AH->format = fmt;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
switch (AH->format)
|
|
|
|
{
|
|
|
|
case archCustom:
|
|
|
|
InitArchiveFmt_Custom(AH);
|
|
|
|
break;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
case archNull:
|
|
|
|
InitArchiveFmt_Null(AH);
|
|
|
|
break;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
case archDirectory:
|
|
|
|
InitArchiveFmt_Directory(AH);
|
|
|
|
break;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
case archTar:
|
|
|
|
InitArchiveFmt_Tar(AH);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("unrecognized file format \"%d\"", fmt);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return AH;
|
|
|
|
}
|
|
|
|
|
Lots of comment-fixing, and minor cosmetic cleanup, in pg_dump/parallel.c.
The commentary in this file was in extremely sad shape. The author(s)
had clearly never heard of the project convention that a function header
comment should provide an API spec of some sort for that function. Much
of it was flat out wrong, too --- maybe it was accurate when written, but
if so it had not been updated to track subsequent code revisions. Rewrite
and rearrange to try to bring it up to speed, and annotate some of the
places where more work is needed. (I've refrained from actually fixing
anything of substance ... yet.)
Also, rename a couple of functions for more clarity as to what they do,
do some very minor code rearrangement, remove some pointless Asserts,
fix an incorrect Assert in readMessageFromPipe, and add a missing socket
close in one error exit from pgpipe(). The last would be a bug if we
tried to continue after pgpipe() failure, but since we don't, it's just
cosmetic at present.
Although this is only cosmetic, back-patch to 9.3 where parallel.c was
added. It's sufficiently invasive that it'll pose a hazard for future
back-patching if we don't.
Discussion: <25239.1464386067@sss.pgh.pa.us>
2016-05-28 20:02:11 +02:00
|
|
|
/*
|
|
|
|
* Write out all data (tables & blobs)
|
|
|
|
*/
|
2000-07-04 16:25:28 +02:00
|
|
|
void
|
2016-01-13 23:48:33 +01:00
|
|
|
WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2009-02-02 21:07:37 +01:00
|
|
|
TocEntry *te;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
if (pstate && pstate->numWorkers > 1)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/*
|
2020-06-14 23:22:47 +02:00
|
|
|
* In parallel mode, this code runs in the leader process. We
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
* construct an array of candidate TEs, then sort it into decreasing
|
|
|
|
* size order, then dispatch each TE to a data-transfer worker. By
|
|
|
|
* dumping larger tables first, we avoid getting into a situation
|
|
|
|
* where we're down to one job and it's big, losing parallelism.
|
|
|
|
*/
|
|
|
|
TocEntry **tes;
|
|
|
|
int ntes;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
tes = (TocEntry **) pg_malloc(AH->tocCount * sizeof(TocEntry *));
|
|
|
|
ntes = 0;
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/* Consider only TEs with dataDumper functions ... */
|
|
|
|
if (!te->dataDumper)
|
|
|
|
continue;
|
|
|
|
/* ... and ignore ones not enabled for dump */
|
|
|
|
if ((te->reqs & REQ_DATA) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
tes[ntes++] = te;
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
Lots of comment-fixing, and minor cosmetic cleanup, in pg_dump/parallel.c.
The commentary in this file was in extremely sad shape. The author(s)
had clearly never heard of the project convention that a function header
comment should provide an API spec of some sort for that function. Much
of it was flat out wrong, too --- maybe it was accurate when written, but
if so it had not been updated to track subsequent code revisions. Rewrite
and rearrange to try to bring it up to speed, and annotate some of the
places where more work is needed. (I've refrained from actually fixing
anything of substance ... yet.)
Also, rename a couple of functions for more clarity as to what they do,
do some very minor code rearrangement, remove some pointless Asserts,
fix an incorrect Assert in readMessageFromPipe, and add a missing socket
close in one error exit from pgpipe(). The last would be a bug if we
tried to continue after pgpipe() failure, but since we don't, it's just
cosmetic at present.
Although this is only cosmetic, back-patch to 9.3 where parallel.c was
added. It's sufficiently invasive that it'll pose a hazard for future
back-patching if we don't.
Discussion: <25239.1464386067@sss.pgh.pa.us>
2016-05-28 20:02:11 +02:00
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
if (ntes > 1)
|
|
|
|
qsort((void *) tes, ntes, sizeof(TocEntry *),
|
|
|
|
TocEntrySizeCompare);
|
|
|
|
|
|
|
|
for (int i = 0; i < ntes; i++)
|
|
|
|
DispatchJobForTocEntry(AH, pstate, tes[i], ACT_DUMP,
|
|
|
|
mark_dump_job_done, NULL);
|
|
|
|
|
|
|
|
pg_free(tes);
|
|
|
|
|
|
|
|
/* Now wait for workers to finish. */
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
WaitForWorkers(AH, pstate, WFW_ALL_IDLE);
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Non-parallel mode: just dump all candidate TEs sequentially. */
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
/* Must have same filter conditions as above */
|
|
|
|
if (!te->dataDumper)
|
|
|
|
continue;
|
|
|
|
if ((te->reqs & REQ_DATA) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
WriteDataChunksForTocEntry(AH, te);
|
|
|
|
}
|
|
|
|
}
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
|
|
|
|
/*
|
2020-06-14 23:22:47 +02:00
|
|
|
* Callback function that's invoked in the leader process after a step has
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
* been parallel dumped.
|
|
|
|
*
|
|
|
|
* We don't need to do anything except check for worker failure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mark_dump_job_done(ArchiveHandle *AH,
|
|
|
|
TocEntry *te,
|
|
|
|
int status,
|
|
|
|
void *callback_data)
|
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("finished item %d %s %s",
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
te->dumpId, te->desc, te->tag);
|
|
|
|
|
|
|
|
if (status != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("worker process failed: exit code %d",
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
status);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
void
|
2016-01-13 23:48:33 +01:00
|
|
|
WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te)
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
2016-08-30 18:00:00 +02:00
|
|
|
StartDataPtrType startPtr;
|
|
|
|
EndDataPtrType endPtr;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->currToc = te;
|
|
|
|
|
|
|
|
if (strcmp(te->desc, "BLOBS") == 0)
|
|
|
|
{
|
|
|
|
startPtr = AH->StartBlobsPtr;
|
|
|
|
endPtr = AH->EndBlobsPtr;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
2013-03-24 16:27:20 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
startPtr = AH->StartDataPtr;
|
|
|
|
endPtr = AH->EndDataPtr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (startPtr != NULL)
|
|
|
|
(*startPtr) (AH, te);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The user-provided DataDumper routine needs to call AH->WriteData
|
|
|
|
*/
|
2017-09-07 18:06:23 +02:00
|
|
|
te->dataDumper((Archive *) AH, te->dataDumperArg);
|
2013-03-24 16:27:20 +01:00
|
|
|
|
|
|
|
if (endPtr != NULL)
|
|
|
|
(*endPtr) (AH, te);
|
|
|
|
|
|
|
|
AH->currToc = NULL;
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
WriteToc(ArchiveHandle *AH)
|
|
|
|
{
|
2003-12-06 04:00:16 +01:00
|
|
|
TocEntry *te;
|
|
|
|
char workbuf[32];
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
int tocCount;
|
2001-04-01 07:42:51 +02:00
|
|
|
int i;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* count entries that will actually be dumped */
|
|
|
|
tocCount = 0;
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) != 0)
|
|
|
|
tocCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* printf("%d TOC Entries to save\n", tocCount); */
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
WriteInt(AH, tocCount);
|
2003-12-06 04:00:16 +01:00
|
|
|
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) == 0)
|
|
|
|
continue;
|
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
WriteInt(AH, te->dumpId);
|
2000-07-04 16:25:28 +02:00
|
|
|
WriteInt(AH, te->dataDumper ? 1 : 0);
|
2003-12-06 04:00:16 +01:00
|
|
|
|
|
|
|
/* OID is recorded as a string for historical reasons */
|
|
|
|
sprintf(workbuf, "%u", te->catalogId.tableoid);
|
|
|
|
WriteStr(AH, workbuf);
|
|
|
|
sprintf(workbuf, "%u", te->catalogId.oid);
|
|
|
|
WriteStr(AH, workbuf);
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2002-07-04 17:35:07 +02:00
|
|
|
WriteStr(AH, te->tag);
|
2000-07-04 16:25:28 +02:00
|
|
|
WriteStr(AH, te->desc);
|
2009-02-02 21:07:37 +01:00
|
|
|
WriteInt(AH, te->section);
|
2000-07-04 16:25:28 +02:00
|
|
|
WriteStr(AH, te->defn);
|
|
|
|
WriteStr(AH, te->dropStmt);
|
2000-07-21 13:40:08 +02:00
|
|
|
WriteStr(AH, te->copyStmt);
|
2002-05-11 00:36:27 +02:00
|
|
|
WriteStr(AH, te->namespace);
|
2004-11-06 20:36:02 +01:00
|
|
|
WriteStr(AH, te->tablespace);
|
2019-03-06 18:54:38 +01:00
|
|
|
WriteStr(AH, te->tableam);
|
2000-07-04 16:25:28 +02:00
|
|
|
WriteStr(AH, te->owner);
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
WriteStr(AH, "false");
|
2001-04-01 07:42:51 +02:00
|
|
|
|
|
|
|
/* Dump list of dependencies */
|
2003-12-06 04:00:16 +01:00
|
|
|
for (i = 0; i < te->nDeps; i++)
|
2001-04-01 07:42:51 +02:00
|
|
|
{
|
2003-12-06 04:00:16 +01:00
|
|
|
sprintf(workbuf, "%d", te->dependencies[i]);
|
|
|
|
WriteStr(AH, workbuf);
|
2001-04-01 07:42:51 +02:00
|
|
|
}
|
|
|
|
WriteStr(AH, NULL); /* Terminate List */
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
if (AH->WriteExtraTocPtr)
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteExtraTocPtr(AH, te);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ReadToc(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
int i;
|
2003-12-06 04:00:16 +01:00
|
|
|
char *tmp;
|
|
|
|
DumpId *deps;
|
2001-04-01 07:42:51 +02:00
|
|
|
int depIdx;
|
|
|
|
int depSize;
|
2009-02-02 21:07:37 +01:00
|
|
|
TocEntry *te;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
AH->tocCount = ReadInt(AH);
|
2003-12-06 04:00:16 +01:00
|
|
|
AH->maxDumpId = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
for (i = 0; i < AH->tocCount; i++)
|
|
|
|
{
|
2012-10-02 21:35:10 +02:00
|
|
|
te = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
2003-12-06 04:00:16 +01:00
|
|
|
te->dumpId = ReadInt(AH);
|
|
|
|
|
|
|
|
if (te->dumpId > AH->maxDumpId)
|
|
|
|
AH->maxDumpId = te->dumpId;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
/* Sanity check */
|
2003-12-06 04:00:16 +01:00
|
|
|
if (te->dumpId <= 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("entry ID %d out of range -- perhaps a corrupt TOC",
|
2012-03-20 22:38:11 +01:00
|
|
|
te->dumpId);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
te->hadDumper = ReadInt(AH);
|
2003-12-06 04:00:16 +01:00
|
|
|
|
|
|
|
if (AH->version >= K_VERS_1_8)
|
|
|
|
{
|
|
|
|
tmp = ReadStr(AH);
|
|
|
|
sscanf(tmp, "%u", &te->catalogId.tableoid);
|
|
|
|
free(tmp);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
te->catalogId.tableoid = InvalidOid;
|
|
|
|
tmp = ReadStr(AH);
|
|
|
|
sscanf(tmp, "%u", &te->catalogId.oid);
|
|
|
|
free(tmp);
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2002-07-04 17:35:07 +02:00
|
|
|
te->tag = ReadStr(AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
te->desc = ReadStr(AH);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
if (AH->version >= K_VERS_1_11)
|
|
|
|
{
|
|
|
|
te->section = ReadInt(AH);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2010-02-18 02:29:10 +01:00
|
|
|
* Rules for pre-8.4 archives wherein pg_dump hasn't classified
|
|
|
|
* the entries into sections. This list need not cover entry
|
|
|
|
* types added later than 8.4.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "COMMENT") == 0 ||
|
2009-10-05 21:24:49 +02:00
|
|
|
strcmp(te->desc, "ACL") == 0 ||
|
2010-02-18 02:29:10 +01:00
|
|
|
strcmp(te->desc, "ACL LANGUAGE") == 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
te->section = SECTION_NONE;
|
|
|
|
else if (strcmp(te->desc, "TABLE DATA") == 0 ||
|
|
|
|
strcmp(te->desc, "BLOBS") == 0 ||
|
|
|
|
strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
|
|
te->section = SECTION_DATA;
|
|
|
|
else if (strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "FK CONSTRAINT") == 0 ||
|
|
|
|
strcmp(te->desc, "INDEX") == 0 ||
|
|
|
|
strcmp(te->desc, "RULE") == 0 ||
|
|
|
|
strcmp(te->desc, "TRIGGER") == 0)
|
|
|
|
te->section = SECTION_POST_DATA;
|
|
|
|
else
|
|
|
|
te->section = SECTION_PRE_DATA;
|
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
te->defn = ReadStr(AH);
|
|
|
|
te->dropStmt = ReadStr(AH);
|
|
|
|
|
|
|
|
if (AH->version >= K_VERS_1_3)
|
|
|
|
te->copyStmt = ReadStr(AH);
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
if (AH->version >= K_VERS_1_6)
|
|
|
|
te->namespace = ReadStr(AH);
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
if (AH->version >= K_VERS_1_10)
|
|
|
|
te->tablespace = ReadStr(AH);
|
|
|
|
|
2019-03-06 18:54:38 +01:00
|
|
|
if (AH->version >= K_VERS_1_14)
|
|
|
|
te->tableam = ReadStr(AH);
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
te->owner = ReadStr(AH);
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
if (AH->version < K_VERS_1_9 || strcmp(ReadStr(AH), "true") == 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("restoring tables WITH OIDS is not supported anymore");
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2001-04-01 07:42:51 +02:00
|
|
|
/* Read TOC entry dependencies */
|
|
|
|
if (AH->version >= K_VERS_1_5)
|
|
|
|
{
|
|
|
|
depSize = 100;
|
2011-11-25 21:40:51 +01:00
|
|
|
deps = (DumpId *) pg_malloc(sizeof(DumpId) * depSize);
|
2001-04-01 07:42:51 +02:00
|
|
|
depIdx = 0;
|
2003-12-06 04:00:16 +01:00
|
|
|
for (;;)
|
2001-04-01 07:42:51 +02:00
|
|
|
{
|
2003-12-06 04:00:16 +01:00
|
|
|
tmp = ReadStr(AH);
|
|
|
|
if (!tmp)
|
|
|
|
break; /* end of list */
|
2003-05-04 00:18:59 +02:00
|
|
|
if (depIdx >= depSize)
|
2001-04-01 07:42:51 +02:00
|
|
|
{
|
|
|
|
depSize *= 2;
|
2011-11-30 02:41:06 +01:00
|
|
|
deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depSize);
|
2001-04-01 07:42:51 +02:00
|
|
|
}
|
2003-12-06 04:00:16 +01:00
|
|
|
sscanf(tmp, "%d", &deps[depIdx]);
|
|
|
|
free(tmp);
|
|
|
|
depIdx++;
|
|
|
|
}
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2003-12-06 04:00:16 +01:00
|
|
|
if (depIdx > 0) /* We have a non-null entry */
|
|
|
|
{
|
2011-11-30 02:41:06 +01:00
|
|
|
deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depIdx);
|
2003-12-06 04:00:16 +01:00
|
|
|
te->dependencies = deps;
|
|
|
|
te->nDeps = depIdx;
|
|
|
|
}
|
2001-06-27 23:21:37 +02:00
|
|
|
else
|
2003-05-04 00:18:59 +02:00
|
|
|
{
|
|
|
|
free(deps);
|
2003-12-06 04:00:16 +01:00
|
|
|
te->dependencies = NULL;
|
|
|
|
te->nDeps = 0;
|
2003-05-04 00:18:59 +02:00
|
|
|
}
|
2001-04-01 07:42:51 +02:00
|
|
|
}
|
2001-06-27 23:21:37 +02:00
|
|
|
else
|
2003-12-06 04:00:16 +01:00
|
|
|
{
|
|
|
|
te->dependencies = NULL;
|
|
|
|
te->nDeps = 0;
|
|
|
|
}
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
te->dataLength = 0;
|
2001-04-01 07:42:51 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->ReadExtraTocPtr)
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->ReadExtraTocPtr(AH, te);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("read TOC entry %d (ID %d) for %s %s",
|
2003-12-06 04:00:16 +01:00
|
|
|
i, te->dumpId, te->desc, te->tag);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
/* link completed entry into TOC circular list */
|
2000-07-21 13:40:08 +02:00
|
|
|
te->prev = AH->toc->prev;
|
|
|
|
AH->toc->prev->next = te;
|
|
|
|
AH->toc->prev = te;
|
|
|
|
te->next = AH->toc;
|
2006-05-28 23:13:54 +02:00
|
|
|
|
|
|
|
/* special processing immediately upon read for some items */
|
|
|
|
if (strcmp(te->desc, "ENCODING") == 0)
|
|
|
|
processEncodingEntry(AH, te);
|
|
|
|
else if (strcmp(te->desc, "STDSTRINGS") == 0)
|
|
|
|
processStdStringsEntry(AH, te);
|
Avoid using unsafe search_path settings during dump and restore.
Historically, pg_dump has "set search_path = foo, pg_catalog" when
dumping an object in schema "foo", and has also caused that setting
to be used while restoring the object. This is problematic because
functions and operators in schema "foo" could capture references meant
to refer to pg_catalog entries, both in the queries issued by pg_dump
and those issued during the subsequent restore run. That could
result in dump/restore misbehavior, or in privilege escalation if a
nefarious user installs trojan-horse functions or operators.
This patch changes pg_dump so that it does not change the search_path
dynamically. The emitted restore script sets the search_path to what
was used at dump time, and then leaves it alone thereafter. Created
objects are placed in the correct schema, regardless of the active
search_path, by dint of schema-qualifying their names in the CREATE
commands, as well as in subsequent ALTER and ALTER-like commands.
Since this change requires a change in the behavior of pg_restore
when processing an archive file made according to this new convention,
bump the archive file version number; old versions of pg_restore will
therefore refuse to process files made with new versions of pg_dump.
Security: CVE-2018-1058
2018-02-26 16:18:21 +01:00
|
|
|
else if (strcmp(te->desc, "SEARCHPATH") == 0)
|
|
|
|
processSearchPathEntry(AH, te);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
static void
|
|
|
|
processEncodingEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
/* te->defn should have the form SET client_encoding = 'foo'; */
|
2011-11-25 21:40:51 +01:00
|
|
|
char *defn = pg_strdup(te->defn);
|
2006-05-28 23:13:54 +02:00
|
|
|
char *ptr1;
|
|
|
|
char *ptr2 = NULL;
|
|
|
|
int encoding;
|
|
|
|
|
|
|
|
ptr1 = strchr(defn, '\'');
|
|
|
|
if (ptr1)
|
|
|
|
ptr2 = strchr(++ptr1, '\'');
|
|
|
|
if (ptr2)
|
|
|
|
{
|
|
|
|
*ptr2 = '\0';
|
|
|
|
encoding = pg_char_to_encoding(ptr1);
|
|
|
|
if (encoding < 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("unrecognized encoding \"%s\"",
|
2012-03-20 22:38:11 +01:00
|
|
|
ptr1);
|
2006-05-28 23:13:54 +02:00
|
|
|
AH->public.encoding = encoding;
|
|
|
|
}
|
|
|
|
else
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("invalid ENCODING item: %s",
|
2012-03-20 22:38:11 +01:00
|
|
|
te->defn);
|
2006-05-28 23:13:54 +02:00
|
|
|
|
|
|
|
free(defn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
processStdStringsEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
/* te->defn should have the form SET standard_conforming_strings = 'x'; */
|
|
|
|
char *ptr1;
|
|
|
|
|
|
|
|
ptr1 = strchr(te->defn, '\'');
|
|
|
|
if (ptr1 && strncmp(ptr1, "'on'", 4) == 0)
|
|
|
|
AH->public.std_strings = true;
|
|
|
|
else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0)
|
|
|
|
AH->public.std_strings = false;
|
|
|
|
else
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("invalid STDSTRINGS item: %s",
|
2012-03-20 22:38:11 +01:00
|
|
|
te->defn);
|
2006-05-28 23:13:54 +02:00
|
|
|
}
|
|
|
|
|
Avoid using unsafe search_path settings during dump and restore.
Historically, pg_dump has "set search_path = foo, pg_catalog" when
dumping an object in schema "foo", and has also caused that setting
to be used while restoring the object. This is problematic because
functions and operators in schema "foo" could capture references meant
to refer to pg_catalog entries, both in the queries issued by pg_dump
and those issued during the subsequent restore run. That could
result in dump/restore misbehavior, or in privilege escalation if a
nefarious user installs trojan-horse functions or operators.
This patch changes pg_dump so that it does not change the search_path
dynamically. The emitted restore script sets the search_path to what
was used at dump time, and then leaves it alone thereafter. Created
objects are placed in the correct schema, regardless of the active
search_path, by dint of schema-qualifying their names in the CREATE
commands, as well as in subsequent ALTER and ALTER-like commands.
Since this change requires a change in the behavior of pg_restore
when processing an archive file made according to this new convention,
bump the archive file version number; old versions of pg_restore will
therefore refuse to process files made with new versions of pg_dump.
Security: CVE-2018-1058
2018-02-26 16:18:21 +01:00
|
|
|
static void
|
|
|
|
processSearchPathEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* te->defn should contain a command to set search_path. We just copy it
|
|
|
|
* verbatim for use later.
|
|
|
|
*/
|
|
|
|
AH->public.searchpath = pg_strdup(te->defn);
|
|
|
|
}
|
|
|
|
|
2015-09-14 15:19:49 +02:00
|
|
|
static void
|
|
|
|
StrictNamesCheck(RestoreOptions *ropt)
|
|
|
|
{
|
|
|
|
const char *missing_name;
|
|
|
|
|
|
|
|
Assert(ropt->strict_names);
|
|
|
|
|
|
|
|
if (ropt->schemaNames.head != NULL)
|
|
|
|
{
|
|
|
|
missing_name = simple_string_list_not_touched(&ropt->schemaNames);
|
|
|
|
if (missing_name != NULL)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("schema \"%s\" not found", missing_name);
|
2015-09-14 15:19:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ropt->tableNames.head != NULL)
|
|
|
|
{
|
|
|
|
missing_name = simple_string_list_not_touched(&ropt->tableNames);
|
|
|
|
if (missing_name != NULL)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("table \"%s\" not found", missing_name);
|
2015-09-14 15:19:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ropt->indexNames.head != NULL)
|
|
|
|
{
|
|
|
|
missing_name = simple_string_list_not_touched(&ropt->indexNames);
|
|
|
|
if (missing_name != NULL)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("index \"%s\" not found", missing_name);
|
2015-09-14 15:19:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ropt->functionNames.head != NULL)
|
|
|
|
{
|
|
|
|
missing_name = simple_string_list_not_touched(&ropt->functionNames);
|
|
|
|
if (missing_name != NULL)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("function \"%s\" not found", missing_name);
|
2015-09-14 15:19:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ropt->triggerNames.head != NULL)
|
|
|
|
{
|
|
|
|
missing_name = simple_string_list_not_touched(&ropt->triggerNames);
|
|
|
|
if (missing_name != NULL)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("trigger \"%s\" not found", missing_name);
|
2015-09-14 15:19:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/*
|
|
|
|
* Determine whether we want to restore this TOC entry.
|
|
|
|
*
|
|
|
|
* Returns 0 if entry should be skipped, or some combination of the
|
|
|
|
* REQ_SCHEMA and REQ_DATA bits if we want to restore schema and/or data
|
|
|
|
* portions of this TOC entry, or REQ_SPECIAL if it's a special entry.
|
|
|
|
*/
|
2020-12-11 19:15:30 +01:00
|
|
|
static int
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
_tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2020-12-11 19:15:30 +01:00
|
|
|
int res = REQ_SCHEMA | REQ_DATA;
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Avoid using unsafe search_path settings during dump and restore.
Historically, pg_dump has "set search_path = foo, pg_catalog" when
dumping an object in schema "foo", and has also caused that setting
to be used while restoring the object. This is problematic because
functions and operators in schema "foo" could capture references meant
to refer to pg_catalog entries, both in the queries issued by pg_dump
and those issued during the subsequent restore run. That could
result in dump/restore misbehavior, or in privilege escalation if a
nefarious user installs trojan-horse functions or operators.
This patch changes pg_dump so that it does not change the search_path
dynamically. The emitted restore script sets the search_path to what
was used at dump time, and then leaves it alone thereafter. Created
objects are placed in the correct schema, regardless of the active
search_path, by dint of schema-qualifying their names in the CREATE
commands, as well as in subsequent ALTER and ALTER-like commands.
Since this change requires a change in the behavior of pg_restore
when processing an archive file made according to this new convention,
bump the archive file version number; old versions of pg_restore will
therefore refuse to process files made with new versions of pg_dump.
Security: CVE-2018-1058
2018-02-26 16:18:21 +01:00
|
|
|
/* These items are treated specially */
|
2006-05-28 23:13:54 +02:00
|
|
|
if (strcmp(te->desc, "ENCODING") == 0 ||
|
Avoid using unsafe search_path settings during dump and restore.
Historically, pg_dump has "set search_path = foo, pg_catalog" when
dumping an object in schema "foo", and has also caused that setting
to be used while restoring the object. This is problematic because
functions and operators in schema "foo" could capture references meant
to refer to pg_catalog entries, both in the queries issued by pg_dump
and those issued during the subsequent restore run. That could
result in dump/restore misbehavior, or in privilege escalation if a
nefarious user installs trojan-horse functions or operators.
This patch changes pg_dump so that it does not change the search_path
dynamically. The emitted restore script sets the search_path to what
was used at dump time, and then leaves it alone thereafter. Created
objects are placed in the correct schema, regardless of the active
search_path, by dint of schema-qualifying their names in the CREATE
commands, as well as in subsequent ALTER and ALTER-like commands.
Since this change requires a change in the behavior of pg_restore
when processing an archive file made according to this new convention,
bump the archive file version number; old versions of pg_restore will
therefore refuse to process files made with new versions of pg_dump.
Security: CVE-2018-1058
2018-02-26 16:18:21 +01:00
|
|
|
strcmp(te->desc, "STDSTRINGS") == 0 ||
|
2021-05-27 19:24:24 +02:00
|
|
|
strcmp(te->desc, "SEARCHPATH") == 0)
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
return REQ_SPECIAL;
|
2004-02-24 04:35:19 +01:00
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/*
|
|
|
|
* DATABASE and DATABASE PROPERTIES also have a special rule: they are
|
|
|
|
* restored in createDB mode, and not restored otherwise, independently of
|
|
|
|
* all else.
|
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "DATABASE") == 0 ||
|
|
|
|
strcmp(te->desc, "DATABASE PROPERTIES") == 0)
|
|
|
|
{
|
|
|
|
if (ropt->createDB)
|
|
|
|
return REQ_SCHEMA;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process exclusions that affect certain classes of TOC entries.
|
|
|
|
*/
|
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/* If it's an ACL, maybe ignore it */
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if (ropt->aclsSkip && _tocEntryIsACL(te))
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Support --no-comments in pg_dump, pg_dumpall, pg_restore.
We have switches already to suppress other subsidiary object properties,
such as ACLs, security labels, ownership, and tablespaces, so just on
the grounds of symmetry we should allow suppressing comments as well.
Also, commit 0d4e6ed30 added a positive reason to have this feature,
i.e. to allow obtaining the old behavior of selective pg_restore should
anyone desire that.
Recent commits have removed the cases where pg_dump emitted comments on
built-in objects that the restoring user might not have privileges to
comment on, so the original primary motivation for this feature is gone,
but it still seems at least somewhat useful in its own right.
Robins Tharakan, reviewed by Fabrízio Mello
Discussion: https://postgr.es/m/CAEP4nAx22Z4ch74oJGzr5RyyjcyUSbpiFLyeYXX8pehfou92ug@mail.gmail.com
2018-01-25 21:27:24 +01:00
|
|
|
/* If it's a comment, maybe ignore it */
|
|
|
|
if (ropt->no_comments && strcmp(te->desc, "COMMENT") == 0)
|
|
|
|
return 0;
|
|
|
|
|
2018-09-25 04:03:56 +02:00
|
|
|
/*
|
|
|
|
* If it's a publication or a table part of a publication, maybe ignore
|
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
if (ropt->no_publications &&
|
|
|
|
(strcmp(te->desc, "PUBLICATION") == 0 ||
|
|
|
|
strcmp(te->desc, "PUBLICATION TABLE") == 0))
|
2017-05-12 15:15:40 +02:00
|
|
|
return 0;
|
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/* If it's a security label, maybe ignore it */
|
2011-05-19 22:20:11 +02:00
|
|
|
if (ropt->no_security_labels && strcmp(te->desc, "SECURITY LABEL") == 0)
|
2010-09-28 02:55:27 +02:00
|
|
|
return 0;
|
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/* If it's a subscription, maybe ignore it */
|
2017-05-09 16:58:06 +02:00
|
|
|
if (ropt->no_subscriptions && strcmp(te->desc, "SUBSCRIPTION") == 0)
|
|
|
|
return 0;
|
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* Ignore it if section is not to be dumped/restored */
|
|
|
|
switch (curSection)
|
2011-12-17 01:09:38 +01:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
case SECTION_PRE_DATA:
|
|
|
|
if (!(ropt->dumpSections & DUMP_PRE_DATA))
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
case SECTION_DATA:
|
|
|
|
if (!(ropt->dumpSections & DUMP_DATA))
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
case SECTION_POST_DATA:
|
|
|
|
if (!(ropt->dumpSections & DUMP_POST_DATA))
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* shouldn't get here, really, but ignore it */
|
2011-12-17 01:09:38 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/* Ignore it if rejected by idWanted[] (cf. SortTocFromFile) */
|
|
|
|
if (ropt->idWanted && !ropt->idWanted[te->dumpId - 1])
|
2016-09-20 18:00:00 +02:00
|
|
|
return 0;
|
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/*
|
|
|
|
* Check options for selective dump/restore.
|
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "ACL") == 0 ||
|
|
|
|
strcmp(te->desc, "COMMENT") == 0 ||
|
|
|
|
strcmp(te->desc, "SECURITY LABEL") == 0)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/* Database properties react to createDB, not selectivity options. */
|
|
|
|
if (strncmp(te->tag, "DATABASE ", 9) == 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
if (!ropt->createDB)
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
else if (ropt->schemaNames.head != NULL ||
|
|
|
|
ropt->schemaExcludeNames.head != NULL ||
|
|
|
|
ropt->selTypes)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/*
|
|
|
|
* In a selective dump/restore, we want to restore these dependent
|
|
|
|
* TOC entry types only if their parent object is being restored.
|
|
|
|
* Without selectivity options, we let through everything in the
|
|
|
|
* archive. Note there may be such entries with no parent, eg
|
|
|
|
* non-default ACLs for built-in objects.
|
|
|
|
*
|
|
|
|
* This code depends on the parent having been marked already,
|
|
|
|
* which should be the case; if it isn't, perhaps due to
|
|
|
|
* SortTocFromFile rearrangement, skipping the dependent entry
|
|
|
|
* seems prudent anyway.
|
|
|
|
*
|
|
|
|
* Ideally we'd handle, eg, table CHECK constraints this way too.
|
|
|
|
* But it's hard to tell which of their dependencies is the one to
|
|
|
|
* consult.
|
|
|
|
*/
|
|
|
|
if (te->nDeps != 1 ||
|
|
|
|
TocIDRequired(AH, te->dependencies[0]) == 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Apply selective-restore rules for standalone TOC entries. */
|
|
|
|
if (ropt->schemaNames.head != NULL)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/* If no namespace is specified, it means all. */
|
|
|
|
if (!te->namespace)
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
if (!simple_string_list_member(&ropt->schemaNames, te->namespace))
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
|
|
|
|
if (ropt->schemaExcludeNames.head != NULL &&
|
|
|
|
te->namespace &&
|
|
|
|
simple_string_list_member(&ropt->schemaExcludeNames, te->namespace))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (ropt->selTypes)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
if (strcmp(te->desc, "TABLE") == 0 ||
|
|
|
|
strcmp(te->desc, "TABLE DATA") == 0 ||
|
|
|
|
strcmp(te->desc, "VIEW") == 0 ||
|
|
|
|
strcmp(te->desc, "FOREIGN TABLE") == 0 ||
|
|
|
|
strcmp(te->desc, "MATERIALIZED VIEW") == 0 ||
|
|
|
|
strcmp(te->desc, "MATERIALIZED VIEW DATA") == 0 ||
|
|
|
|
strcmp(te->desc, "SEQUENCE") == 0 ||
|
|
|
|
strcmp(te->desc, "SEQUENCE SET") == 0)
|
|
|
|
{
|
|
|
|
if (!ropt->selTable)
|
|
|
|
return 0;
|
|
|
|
if (ropt->tableNames.head != NULL &&
|
|
|
|
!simple_string_list_member(&ropt->tableNames, te->tag))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else if (strcmp(te->desc, "INDEX") == 0)
|
|
|
|
{
|
|
|
|
if (!ropt->selIndex)
|
|
|
|
return 0;
|
|
|
|
if (ropt->indexNames.head != NULL &&
|
|
|
|
!simple_string_list_member(&ropt->indexNames, te->tag))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else if (strcmp(te->desc, "FUNCTION") == 0 ||
|
|
|
|
strcmp(te->desc, "AGGREGATE") == 0 ||
|
|
|
|
strcmp(te->desc, "PROCEDURE") == 0)
|
|
|
|
{
|
|
|
|
if (!ropt->selFunction)
|
|
|
|
return 0;
|
|
|
|
if (ropt->functionNames.head != NULL &&
|
|
|
|
!simple_string_list_member(&ropt->functionNames, te->tag))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else if (strcmp(te->desc, "TRIGGER") == 0)
|
|
|
|
{
|
|
|
|
if (!ropt->selTrigger)
|
|
|
|
return 0;
|
|
|
|
if (ropt->triggerNames.head != NULL &&
|
|
|
|
!simple_string_list_member(&ropt->triggerNames, te->tag))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else
|
2000-07-21 13:40:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2001-11-04 05:05:36 +01:00
|
|
|
/*
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
* Determine whether the TOC entry contains schema and/or data components,
|
|
|
|
* and mask off inapplicable REQ bits. If it had a dataDumper, assume
|
|
|
|
* it's both schema and data. Otherwise it's probably schema-only, but
|
|
|
|
* there are exceptions.
|
2001-11-04 05:05:36 +01:00
|
|
|
*/
|
|
|
|
if (!te->hadDumper)
|
|
|
|
{
|
|
|
|
/*
|
2010-02-18 02:29:10 +01:00
|
|
|
* Special Case: If 'SEQUENCE SET' or anything to do with BLOBs, then
|
|
|
|
* it is considered a data entry. We don't need to check for the
|
|
|
|
* BLOBS entry or old-style BLOB COMMENTS, because they will have
|
Move handling of database properties from pg_dumpall into pg_dump.
This patch rearranges the division of labor between pg_dump and pg_dumpall
so that pg_dump itself handles all properties attached to a single
database. Notably, a database's ACL (GRANT/REVOKE status) and local GUC
settings established by ALTER DATABASE SET and ALTER ROLE IN DATABASE SET
can be dumped and restored by pg_dump. This is a long-requested
improvement.
"pg_dumpall -g" will now produce only role- and tablespace-related output,
nothing about individual databases. The total output of a regular
pg_dumpall run remains the same.
pg_dump (or pg_restore) will restore database-level properties only when
creating the target database with --create. This applies not only to
ACLs and GUCs but to the other database properties it already handled,
that is database comments and security labels. This is more consistent
and useful, but does represent an incompatibility in the behavior seen
without --create.
(This change makes the proposed patch to have pg_dump use "COMMENT ON
DATABASE CURRENT_DATABASE" unnecessary, since there is no case where
the command is issued that we won't know the true name of the database.
We might still want that patch as a feature in its own right, but pg_dump
no longer needs it.)
pg_dumpall with --clean will now drop and recreate the "postgres" and
"template1" databases in the target cluster, allowing their locale and
encoding settings to be changed if necessary, and providing a cleaner
way to set nondefault tablespaces for them than we had before. This
means that such a script must now always be started in the "postgres"
database; the order of drops and reconnects will not work otherwise.
Without --clean, the script will not adjust any database-level properties
of those two databases (including their comments, ACLs, and security
labels, which it formerly would try to set).
Another minor incompatibility is that the CREATE DATABASE commands in a
pg_dumpall script will now always specify locale and encoding settings.
Formerly those would be omitted if they matched the cluster's default.
While that behavior had some usefulness in some migration scenarios,
it also posed a significant hazard of unwanted locale/encoding changes.
To migrate to another locale/encoding, it's now necessary to use pg_dump
without --create to restore into a database with the desired settings.
Commit 4bd371f6f's hack to emit "SET default_transaction_read_only = off"
is gone: we now dodge that problem by the expedient of not issuing ALTER
DATABASE SET commands until after reconnecting to the target database.
Therefore, such settings won't apply during the restore session.
In passing, improve some shaky grammar in the docs, and add a note pointing
out that pg_dumpall's output can't be expected to load without any errors.
(Someday we might want to fix that, but this is not that patch.)
Haribabu Kommi, reviewed at various times by Andreas Karlsson,
Vaishnavi Prabakaran, and Robert Haas; further hacking by me.
Discussion: https://postgr.es/m/CAJrrPGcUurV0eWTeXODwsOYFN=Ekq36t1s0YnFYUNzsmRfdAyA@mail.gmail.com
2018-01-22 20:09:09 +01:00
|
|
|
* hadDumper = true ... but we do need to check new-style BLOB ACLs,
|
|
|
|
* comments, etc.
|
2001-11-04 05:05:36 +01:00
|
|
|
*/
|
2010-02-18 02:29:10 +01:00
|
|
|
if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
|
|
|
|
strcmp(te->desc, "BLOB") == 0 ||
|
|
|
|
(strcmp(te->desc, "ACL") == 0 &&
|
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0) ||
|
|
|
|
(strcmp(te->desc, "COMMENT") == 0 &&
|
2010-09-28 02:55:27 +02:00
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0) ||
|
|
|
|
(strcmp(te->desc, "SECURITY LABEL") == 0 &&
|
2010-02-18 02:29:10 +01:00
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0))
|
2001-11-04 05:05:36 +01:00
|
|
|
res = res & REQ_DATA;
|
|
|
|
else
|
|
|
|
res = res & ~REQ_DATA;
|
|
|
|
}
|
2000-10-13 02:43:31 +02:00
|
|
|
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
/* If there's no definition command, there's no schema component */
|
|
|
|
if (!te->defn || !te->defn[0])
|
|
|
|
res = res & ~REQ_SCHEMA;
|
|
|
|
|
2002-01-18 20:17:05 +01:00
|
|
|
/*
|
2005-08-12 03:36:05 +02:00
|
|
|
* Special case: <Init> type with <Max OID> tag; this is obsolete and we
|
|
|
|
* always ignore it.
|
2002-01-18 20:17:05 +01:00
|
|
|
*/
|
2002-07-04 17:35:07 +02:00
|
|
|
if ((strcmp(te->desc, "<Init>") == 0) && (strcmp(te->tag, "Max OID") == 0))
|
2005-08-12 03:36:05 +02:00
|
|
|
return 0;
|
2002-01-18 18:13:51 +01:00
|
|
|
|
2000-07-04 16:25:28 +02:00
|
|
|
/* Mask it if we only want schema */
|
|
|
|
if (ropt->schemaOnly)
|
2016-08-23 18:00:00 +02:00
|
|
|
{
|
pg_upgrade: Fix large object COMMENTS, SECURITY LABELS
When performing a pg_upgrade, we copy the files behind pg_largeobject
and pg_largeobject_metadata, allowing us to avoid having to dump out and
reload the actual data for large objects and their ACLs.
Unfortunately, that isn't all of the information which can be associated
with large objects. Currently, we also support COMMENTs and SECURITY
LABELs with large objects and these were being silently dropped during a
pg_upgrade as pg_dump would skip everything having to do with a large
object and pg_upgrade only copied the tables mentioned to the new
cluster.
As the file copies happen after the catalog dump and reload, we can't
simply include the COMMENTs and SECURITY LABELs in pg_dump's binary-mode
output but we also have to include the actual large object definition as
well. With the definition, comments, and security labels in the pg_dump
output and the file copies performed by pg_upgrade, all of the data and
metadata associated with large objects is able to be successfully pulled
forward across a pg_upgrade.
In 9.6 and master, we can simply adjust the dump bitmask to indicate
which components we don't want. In 9.5 and earlier, we have to put
explciit checks in in dumpBlob() and dumpBlobs() to not include the ACL
or the data when in binary-upgrade mode.
Adjustments made to the privileges regression test to allow another test
(large_object.sql) to be added which explicitly leaves a large object
with a comment in place to provide coverage of that case with
pg_upgrade.
Back-patch to all supported branches.
Discussion: https://postgr.es/m/20170221162655.GE9812@tamriel.snowman.net
2017-03-06 23:03:57 +01:00
|
|
|
/*
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
* The sequence_data option overrides schemaOnly for SEQUENCE SET.
|
Make pg_dump's ACL, sec label, and comment entries reliably identifiable.
_tocEntryRequired() expects that it can identify ACL, SECURITY LABEL,
and COMMENT TOC entries that are for large objects by seeing whether
the tag for them starts with "LARGE OBJECT ". While that works fine
for actual large objects, which are indeed tagged that way, it's
subject to false positives unless every such entry's tag starts with an
appropriate type ID. And in fact it does not work for ACLs, because
up to now we customarily tagged those entries with just the bare name
of the object. This means that an ACL for an object named
"LARGE OBJECT something" would be misclassified as data not schema,
with undesirable results in a schema-only or data-only dump ---
although pg_upgrade seems unaffected, due to the special case for
binary-upgrade mode further down in _tocEntryRequired().
We can fix this by changing all the dumpACL calls to use the label
strings already in use for comments and security labels, which do
follow the convention of starting with an object type indicator.
Well, mostly they follow it. dumpDatabase() got it wrong, using
just the bare database name for those purposes, so that a database
named "LARGE OBJECT something" would similarly be subject to having
its comment or security label dropped or included when not wanted.
Bring that into line too. (Note that up to now, database ACLs have
not been processed by pg_dump, so that this issue doesn't affect them.)
_tocEntryRequired() itself is not free of fault: it was overly liberal
about matching object tags to "LARGE OBJECT " in binary-upgrade mode.
This looks like it is probably harmless because there would be no data
component to strip anyway in that mode, but at best it's trouble
waiting to happen, so tighten that up too.
The possible misclassification of SECURITY LABEL entries for databases is
in principle a security problem, but the opportunities for actual exploits
seem too narrow to be interesting. The other cases seem like just bugs,
since an object owner can change its ACL or comment for himself, he needn't
try to trick someone else into doing it by choosing a strange name.
This has been broken since per-large-object TOC entries were introduced
in 9.0, so back-patch to all supported branches.
Discussion: https://postgr.es/m/21714.1516553459@sss.pgh.pa.us
2018-01-22 18:06:18 +01:00
|
|
|
*
|
Clean up some aspects of pg_dump/pg_restore item-selection logic.
Ensure that CREATE DATABASE and related commands are issued when, and
only when, --create is specified. Previously there were scenarios
where using selective-dump switches would prevent --create from having
any effect. For example, it would fail to do anything in pg_restore
if the archive file had been made by a selective dump, because there
would be no TOC entry for the database.
Since we don't issue \connect either if we don't issue CREATE DATABASE,
this could result in unexpectedly restoring objects into the wrong
database.
Also fix pg_restore's selective restore logic so that when an object is
selected to be restored, we also restore its ACL, comment, and security
label if any. Previously there was no way to get the latter properties
except through tedious mucking about with a -L file. If, for some
reason, you don't want these properties, you can match the old behavior
by adding --no-acl etc.
While at it, try to make _tocEntryRequired() a little better organized
and better documented.
Discussion: https://postgr.es/m/32668.1516848577@sss.pgh.pa.us
2018-01-25 20:26:07 +01:00
|
|
|
* In binary-upgrade mode, even with schemaOnly set, we do not mask
|
|
|
|
* out large objects. (Only large object definitions, comments and
|
|
|
|
* other metadata should be generated in binary-upgrade mode, not the
|
|
|
|
* actual data, but that need not concern us here.)
|
pg_upgrade: Fix large object COMMENTS, SECURITY LABELS
When performing a pg_upgrade, we copy the files behind pg_largeobject
and pg_largeobject_metadata, allowing us to avoid having to dump out and
reload the actual data for large objects and their ACLs.
Unfortunately, that isn't all of the information which can be associated
with large objects. Currently, we also support COMMENTs and SECURITY
LABELs with large objects and these were being silently dropped during a
pg_upgrade as pg_dump would skip everything having to do with a large
object and pg_upgrade only copied the tables mentioned to the new
cluster.
As the file copies happen after the catalog dump and reload, we can't
simply include the COMMENTs and SECURITY LABELs in pg_dump's binary-mode
output but we also have to include the actual large object definition as
well. With the definition, comments, and security labels in the pg_dump
output and the file copies performed by pg_upgrade, all of the data and
metadata associated with large objects is able to be successfully pulled
forward across a pg_upgrade.
In 9.6 and master, we can simply adjust the dump bitmask to indicate
which components we don't want. In 9.5 and earlier, we have to put
explciit checks in in dumpBlob() and dumpBlobs() to not include the ACL
or the data when in binary-upgrade mode.
Adjustments made to the privileges regression test to allow another test
(large_object.sql) to be added which explicitly leaves a large object
with a comment in place to provide coverage of that case with
pg_upgrade.
Back-patch to all supported branches.
Discussion: https://postgr.es/m/20170221162655.GE9812@tamriel.snowman.net
2017-03-06 23:03:57 +01:00
|
|
|
*/
|
|
|
|
if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0) &&
|
Make pg_dump's ACL, sec label, and comment entries reliably identifiable.
_tocEntryRequired() expects that it can identify ACL, SECURITY LABEL,
and COMMENT TOC entries that are for large objects by seeing whether
the tag for them starts with "LARGE OBJECT ". While that works fine
for actual large objects, which are indeed tagged that way, it's
subject to false positives unless every such entry's tag starts with an
appropriate type ID. And in fact it does not work for ACLs, because
up to now we customarily tagged those entries with just the bare name
of the object. This means that an ACL for an object named
"LARGE OBJECT something" would be misclassified as data not schema,
with undesirable results in a schema-only or data-only dump ---
although pg_upgrade seems unaffected, due to the special case for
binary-upgrade mode further down in _tocEntryRequired().
We can fix this by changing all the dumpACL calls to use the label
strings already in use for comments and security labels, which do
follow the convention of starting with an object type indicator.
Well, mostly they follow it. dumpDatabase() got it wrong, using
just the bare database name for those purposes, so that a database
named "LARGE OBJECT something" would similarly be subject to having
its comment or security label dropped or included when not wanted.
Bring that into line too. (Note that up to now, database ACLs have
not been processed by pg_dump, so that this issue doesn't affect them.)
_tocEntryRequired() itself is not free of fault: it was overly liberal
about matching object tags to "LARGE OBJECT " in binary-upgrade mode.
This looks like it is probably harmless because there would be no data
component to strip anyway in that mode, but at best it's trouble
waiting to happen, so tighten that up too.
The possible misclassification of SECURITY LABEL entries for databases is
in principle a security problem, but the opportunities for actual exploits
seem too narrow to be interesting. The other cases seem like just bugs,
since an object owner can change its ACL or comment for himself, he needn't
try to trick someone else into doing it by choosing a strange name.
This has been broken since per-large-object TOC entries were introduced
in 9.0, so back-patch to all supported branches.
Discussion: https://postgr.es/m/21714.1516553459@sss.pgh.pa.us
2018-01-22 18:06:18 +01:00
|
|
|
!(ropt->binary_upgrade &&
|
|
|
|
(strcmp(te->desc, "BLOB") == 0 ||
|
|
|
|
(strcmp(te->desc, "ACL") == 0 &&
|
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0) ||
|
|
|
|
(strcmp(te->desc, "COMMENT") == 0 &&
|
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0) ||
|
|
|
|
(strcmp(te->desc, "SECURITY LABEL") == 0 &&
|
|
|
|
strncmp(te->tag, "LARGE OBJECT ", 13) == 0))))
|
2016-08-23 18:00:00 +02:00
|
|
|
res = res & REQ_SCHEMA;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* Mask it if we only want data */
|
2001-05-12 03:03:59 +02:00
|
|
|
if (ropt->dataOnly)
|
2001-11-04 05:05:36 +01:00
|
|
|
res = res & REQ_DATA;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* Identify which pass we should restore this TOC entry in.
|
|
|
|
*
|
|
|
|
* See notes with the RestorePass typedef in pg_backup_archiver.h.
|
|
|
|
*/
|
|
|
|
static RestorePass
|
|
|
|
_tocEntryRestorePass(TocEntry *te)
|
|
|
|
{
|
|
|
|
/* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */
|
|
|
|
if (strcmp(te->desc, "ACL") == 0 ||
|
|
|
|
strcmp(te->desc, "ACL LANGUAGE") == 0 ||
|
|
|
|
strcmp(te->desc, "DEFAULT ACL") == 0)
|
|
|
|
return RESTORE_PASS_ACL;
|
Fix pg_dump/pg_restore to restore event triggers later.
Previously, event triggers were restored just after regular triggers
(and FK constraints, which are basically triggers). This is risky
since an event trigger, once installed, could interfere with subsequent
restore commands. Worse, because event triggers don't have any
particular dependencies on any post-data objects, a parallel restore
would consider them eligible to be restored the moment the post-data
phase starts, allowing them to also interfere with restoration of a
whole bunch of objects that would have been restored before them in
a serial restore. There's no way to completely remove the risk of a
misguided event trigger breaking the restore, since if nothing else
it could break other event triggers. But we can certainly push them
to later in the process to minimize the hazard.
To fix, tweak the RestorePass mechanism introduced by commit 3eb9a5e7c
so that event triggers are handled as part of the post-ACL processing
pass (renaming the "REFRESH" pass to "POST_ACL" to reflect its more
general use). This will cause them to restore after everything except
matview refreshes, which seems OK since matview refreshes really ought
to run in the post-restore state of the database. In a parallel
restore, event triggers and matview refreshes might be intermixed,
but that seems all right as well.
Also update the code and comments in pg_dump_sort.c so that its idea
of how things are sorted agrees with what actually happens due to
the RestorePass mechanism. This is mostly cosmetic: it'll affect the
order of objects in a dump's TOC, but not the actual restore order.
But not changing that would be quite confusing to somebody reading
the code.
Back-patch to all supported branches.
Fabrízio de Royes Mello, tweaked a bit by me
Discussion: https://postgr.es/m/CAFcNs+ow1hmFox8P--3GSdtwz-S3Binb6ZmoP6Vk+Xg=K6eZNA@mail.gmail.com
2020-03-09 19:58:11 +01:00
|
|
|
if (strcmp(te->desc, "EVENT TRIGGER") == 0 ||
|
|
|
|
strcmp(te->desc, "MATERIALIZED VIEW DATA") == 0)
|
|
|
|
return RESTORE_PASS_POST_ACL;
|
2020-04-08 17:23:39 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Comments need to be emitted in the same pass as their parent objects.
|
|
|
|
* ACLs haven't got comments, and neither do matview data objects, but
|
|
|
|
* event triggers do. (Fortunately, event triggers haven't got ACLs, or
|
|
|
|
* we'd need yet another weird special case.)
|
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "COMMENT") == 0 &&
|
|
|
|
strncmp(te->tag, "EVENT TRIGGER ", 14) == 0)
|
|
|
|
return RESTORE_PASS_POST_ACL;
|
|
|
|
|
|
|
|
/* All else can be handled in the main pass. */
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
return RESTORE_PASS_MAIN;
|
|
|
|
}
|
|
|
|
|
2010-02-18 02:29:10 +01:00
|
|
|
/*
|
|
|
|
* Identify TOC entries that are ACLs.
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
*
|
|
|
|
* Note: it seems worth duplicating some code here to avoid a hard-wired
|
|
|
|
* assumption that these are exactly the same entries that we restore during
|
|
|
|
* the RESTORE_PASS_ACL phase.
|
2010-02-18 02:29:10 +01:00
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
_tocEntryIsACL(TocEntry *te)
|
|
|
|
{
|
|
|
|
/* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */
|
|
|
|
if (strcmp(te->desc, "ACL") == 0 ||
|
|
|
|
strcmp(te->desc, "ACL LANGUAGE") == 0 ||
|
|
|
|
strcmp(te->desc, "DEFAULT ACL") == 0)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
/*
|
|
|
|
* Issue SET commands for parameters that we want to have set the same way
|
|
|
|
* at all times during execution of a restore script.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_doSetFixedOutputState(ArchiveHandle *AH)
|
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
|
2016-06-15 16:52:53 +02:00
|
|
|
/*
|
|
|
|
* Disable timeouts to allow for slow commands, idle parallel workers, etc
|
|
|
|
*/
|
2008-05-04 10:32:21 +02:00
|
|
|
ahprintf(AH, "SET statement_timeout = 0;\n");
|
2013-03-17 04:22:17 +01:00
|
|
|
ahprintf(AH, "SET lock_timeout = 0;\n");
|
2016-06-15 16:52:53 +02:00
|
|
|
ahprintf(AH, "SET idle_in_transaction_session_timeout = 0;\n");
|
2013-03-17 04:22:17 +01:00
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
/* Select the correct character set encoding */
|
|
|
|
ahprintf(AH, "SET client_encoding = '%s';\n",
|
|
|
|
pg_encoding_to_char(AH->public.encoding));
|
2004-02-24 04:35:19 +01:00
|
|
|
|
2006-05-28 23:13:54 +02:00
|
|
|
/* Select the correct string literal syntax */
|
|
|
|
ahprintf(AH, "SET standard_conforming_strings = %s;\n",
|
|
|
|
AH->public.std_strings ? "on" : "off");
|
2004-02-24 04:35:19 +01:00
|
|
|
|
2009-01-05 17:54:37 +01:00
|
|
|
/* Select the role to be used during restore */
|
2016-01-13 23:48:33 +01:00
|
|
|
if (ropt && ropt->use_role)
|
|
|
|
ahprintf(AH, "SET ROLE %s;\n", fmtId(ropt->use_role));
|
2009-01-05 17:54:37 +01:00
|
|
|
|
Avoid using unsafe search_path settings during dump and restore.
Historically, pg_dump has "set search_path = foo, pg_catalog" when
dumping an object in schema "foo", and has also caused that setting
to be used while restoring the object. This is problematic because
functions and operators in schema "foo" could capture references meant
to refer to pg_catalog entries, both in the queries issued by pg_dump
and those issued during the subsequent restore run. That could
result in dump/restore misbehavior, or in privilege escalation if a
nefarious user installs trojan-horse functions or operators.
This patch changes pg_dump so that it does not change the search_path
dynamically. The emitted restore script sets the search_path to what
was used at dump time, and then leaves it alone thereafter. Created
objects are placed in the correct schema, regardless of the active
search_path, by dint of schema-qualifying their names in the CREATE
commands, as well as in subsequent ALTER and ALTER-like commands.
Since this change requires a change in the behavior of pg_restore
when processing an archive file made according to this new convention,
bump the archive file version number; old versions of pg_restore will
therefore refuse to process files made with new versions of pg_dump.
Security: CVE-2018-1058
2018-02-26 16:18:21 +01:00
|
|
|
/* Select the dump-time search_path */
|
|
|
|
if (AH->public.searchpath)
|
|
|
|
ahprintf(AH, "%s", AH->public.searchpath);
|
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
/* Make sure function checking is disabled */
|
|
|
|
ahprintf(AH, "SET check_function_bodies = false;\n");
|
|
|
|
|
2019-03-23 21:51:25 +01:00
|
|
|
/* Ensure that all valid XML data will be accepted */
|
|
|
|
ahprintf(AH, "SET xmloption = content;\n");
|
|
|
|
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
/* Avoid annoying notices etc */
|
|
|
|
ahprintf(AH, "SET client_min_messages = warning;\n");
|
2006-05-28 23:13:54 +02:00
|
|
|
if (!AH->public.std_strings)
|
|
|
|
ahprintf(AH, "SET escape_string_warning = off;\n");
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
|
2015-02-18 18:23:40 +01:00
|
|
|
/* Adjust row-security state */
|
2016-01-13 23:48:33 +01:00
|
|
|
if (ropt && ropt->enable_row_security)
|
2015-02-18 18:23:40 +01:00
|
|
|
ahprintf(AH, "SET row_security = on;\n");
|
|
|
|
else
|
|
|
|
ahprintf(AH, "SET row_security = off;\n");
|
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
ahprintf(AH, "\n");
|
|
|
|
}
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
/*
|
|
|
|
* Issue a SET SESSION AUTHORIZATION command. Caller is responsible
|
2003-09-24 00:48:53 +02:00
|
|
|
* for updating state if appropriate. If user is NULL or an empty string,
|
|
|
|
* the specification DEFAULT will be used.
|
2002-05-11 00:36:27 +02:00
|
|
|
*/
|
|
|
|
static void
|
2002-08-18 11:36:26 +02:00
|
|
|
_doSetSessionAuth(ArchiveHandle *AH, const char *user)
|
2002-05-11 00:36:27 +02:00
|
|
|
{
|
2002-08-18 11:36:26 +02:00
|
|
|
PQExpBuffer cmd = createPQExpBuffer();
|
2002-09-04 22:31:48 +02:00
|
|
|
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(cmd, "SET SESSION AUTHORIZATION ");
|
2002-09-04 22:31:48 +02:00
|
|
|
|
2003-09-24 00:48:53 +02:00
|
|
|
/*
|
|
|
|
* SQL requires a string literal here. Might as well be correct.
|
|
|
|
*/
|
|
|
|
if (user && *user)
|
2006-05-28 23:13:54 +02:00
|
|
|
appendStringLiteralAHX(cmd, user, AH);
|
2002-08-18 11:36:26 +02:00
|
|
|
else
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(cmd, "DEFAULT");
|
|
|
|
appendPQExpBufferChar(cmd, ';');
|
2002-08-18 11:36:26 +02:00
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
if (RestoringToDB(AH))
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
2002-08-18 11:36:26 +02:00
|
|
|
res = PQexec(AH->connection, cmd->data);
|
2002-05-11 00:36:27 +02:00
|
|
|
|
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
2012-03-20 22:38:11 +01:00
|
|
|
/* NOT warn_or_exit_horribly... use -O instead to skip this. */
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("could not set session user to \"%s\": %s",
|
2012-03-20 22:38:11 +01:00
|
|
|
user, PQerrorMessage(AH->connection));
|
2002-05-11 00:36:27 +02:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
2002-08-18 11:36:26 +02:00
|
|
|
ahprintf(AH, "%s\n\n", cmd->data);
|
|
|
|
|
|
|
|
destroyPQExpBuffer(cmd);
|
2002-05-11 00:36:27 +02:00
|
|
|
}
|
|
|
|
|
2001-08-22 22:23:24 +02:00
|
|
|
|
|
|
|
/*
|
2004-09-10 22:05:18 +02:00
|
|
|
* Issue the commands to connect to the specified database.
|
2001-08-22 22:23:24 +02:00
|
|
|
*
|
|
|
|
* If we're currently restoring right into a database, this will
|
2002-02-11 01:18:20 +01:00
|
|
|
* actually establish a connection. Otherwise it puts a \connect into
|
2001-08-22 22:23:24 +02:00
|
|
|
* the script output.
|
|
|
|
*/
|
2000-08-01 17:51:45 +02:00
|
|
|
static void
|
2004-09-10 22:05:18 +02:00
|
|
|
_reconnectToDB(ArchiveHandle *AH, const char *dbname)
|
2000-07-24 08:24:26 +02:00
|
|
|
{
|
2003-09-24 00:48:53 +02:00
|
|
|
if (RestoringToDB(AH))
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
ReconnectToServer(AH, dbname);
|
2001-08-22 22:23:24 +02:00
|
|
|
else
|
2002-02-11 01:18:20 +01:00
|
|
|
{
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
PQExpBufferData connectbuf;
|
2002-02-11 01:18:20 +01:00
|
|
|
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
initPQExpBuffer(&connectbuf);
|
|
|
|
appendPsqlMetaConnect(&connectbuf, dbname);
|
|
|
|
ahprintf(AH, "%s\n", connectbuf.data);
|
|
|
|
termPQExpBuffer(&connectbuf);
|
2002-02-11 01:18:20 +01:00
|
|
|
}
|
2001-08-22 22:23:24 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: currUser keeps track of what the imaginary session user in our
|
2004-09-10 22:05:18 +02:00
|
|
|
* script is. It's now effectively reset to the original userID.
|
2001-08-22 22:23:24 +02:00
|
|
|
*/
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->currUser = NULL;
|
2001-08-22 22:23:24 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/* don't assume we still know the output schema, tablespace, etc either */
|
2003-09-24 00:48:53 +02:00
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->currSchema = NULL;
|
|
|
|
if (AH->currTablespace)
|
|
|
|
free(AH->currTablespace);
|
|
|
|
AH->currTablespace = NULL;
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2004-02-24 04:35:19 +01:00
|
|
|
/* re-establish fixed state */
|
|
|
|
_doSetFixedOutputState(AH);
|
2000-08-01 17:51:45 +02:00
|
|
|
}
|
|
|
|
|
2003-09-24 00:48:53 +02:00
|
|
|
/*
|
|
|
|
* Become the specified user, and update state to avoid redundant commands
|
|
|
|
*
|
|
|
|
* NULL or empty argument is taken to mean restoring the session default
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_becomeUser(ArchiveHandle *AH, const char *user)
|
|
|
|
{
|
|
|
|
if (!user)
|
|
|
|
user = ""; /* avoid null pointers */
|
|
|
|
|
|
|
|
if (AH->currUser && strcmp(AH->currUser, user) == 0)
|
|
|
|
return; /* no need to do anything */
|
|
|
|
|
|
|
|
_doSetSessionAuth(AH, user);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: currUser keeps track of what the imaginary session user in our
|
|
|
|
* script is
|
|
|
|
*/
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->currUser = pg_strdup(user);
|
2003-09-24 00:48:53 +02:00
|
|
|
}
|
2001-08-22 22:23:24 +02:00
|
|
|
|
|
|
|
/*
|
2010-04-24 01:21:44 +02:00
|
|
|
* Become the owner of the given TOC entry object. If
|
2001-08-22 22:23:24 +02:00
|
|
|
* changes in ownership are not allowed, this doesn't do anything.
|
|
|
|
*/
|
2000-08-01 17:51:45 +02:00
|
|
|
static void
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeOwner(ArchiveHandle *AH, TocEntry *te)
|
2000-08-01 17:51:45 +02:00
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
|
|
|
|
if (ropt && (ropt->noOwner || !ropt->use_setsessauth))
|
2000-08-01 17:51:45 +02:00
|
|
|
return;
|
|
|
|
|
2003-09-24 00:48:53 +02:00
|
|
|
_becomeUser(AH, te->owner);
|
2000-07-24 08:24:26 +02:00
|
|
|
}
|
|
|
|
|
2001-08-22 22:23:24 +02:00
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
/*
|
|
|
|
* Issue the commands to select the specified schema as the current schema
|
|
|
|
* in the target database.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
|
|
|
|
{
|
2002-05-29 00:26:57 +02:00
|
|
|
PQExpBuffer qry;
|
|
|
|
|
Avoid using unsafe search_path settings during dump and restore.
Historically, pg_dump has "set search_path = foo, pg_catalog" when
dumping an object in schema "foo", and has also caused that setting
to be used while restoring the object. This is problematic because
functions and operators in schema "foo" could capture references meant
to refer to pg_catalog entries, both in the queries issued by pg_dump
and those issued during the subsequent restore run. That could
result in dump/restore misbehavior, or in privilege escalation if a
nefarious user installs trojan-horse functions or operators.
This patch changes pg_dump so that it does not change the search_path
dynamically. The emitted restore script sets the search_path to what
was used at dump time, and then leaves it alone thereafter. Created
objects are placed in the correct schema, regardless of the active
search_path, by dint of schema-qualifying their names in the CREATE
commands, as well as in subsequent ALTER and ALTER-like commands.
Since this change requires a change in the behavior of pg_restore
when processing an archive file made according to this new convention,
bump the archive file version number; old versions of pg_restore will
therefore refuse to process files made with new versions of pg_dump.
Security: CVE-2018-1058
2018-02-26 16:18:21 +01:00
|
|
|
/*
|
|
|
|
* If there was a SEARCHPATH TOC entry, we're supposed to just stay with
|
|
|
|
* that search_path rather than switching to entry-specific paths.
|
|
|
|
* Otherwise, it's an old archive that will not restore correctly unless
|
|
|
|
* we set the search_path as it's expecting.
|
|
|
|
*/
|
|
|
|
if (AH->public.searchpath)
|
|
|
|
return;
|
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
if (!schemaName || *schemaName == '\0' ||
|
2006-04-19 18:02:17 +02:00
|
|
|
(AH->currSchema && strcmp(AH->currSchema, schemaName) == 0))
|
2002-05-11 00:36:27 +02:00
|
|
|
return; /* no need to do anything */
|
|
|
|
|
2002-05-29 00:26:57 +02:00
|
|
|
qry = createPQExpBuffer();
|
|
|
|
|
|
|
|
appendPQExpBuffer(qry, "SET search_path = %s",
|
2002-08-18 11:36:26 +02:00
|
|
|
fmtId(schemaName));
|
2002-05-29 00:26:57 +02:00
|
|
|
if (strcmp(schemaName, "pg_catalog") != 0)
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(qry, ", pg_catalog");
|
2002-05-29 00:26:57 +02:00
|
|
|
|
2002-05-11 00:36:27 +02:00
|
|
|
if (RestoringToDB(AH))
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
res = PQexec(AH->connection, qry->data);
|
|
|
|
|
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
warn_or_exit_horribly(AH,
|
2012-03-20 22:38:11 +01:00
|
|
|
"could not set search_path to \"%s\": %s",
|
|
|
|
schemaName, PQerrorMessage(AH->connection));
|
2002-05-11 00:36:27 +02:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
2002-05-29 00:26:57 +02:00
|
|
|
ahprintf(AH, "%s;\n\n", qry->data);
|
2002-05-11 00:36:27 +02:00
|
|
|
|
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->currSchema = pg_strdup(schemaName);
|
2002-05-29 00:26:57 +02:00
|
|
|
|
|
|
|
destroyPQExpBuffer(qry);
|
2002-05-11 00:36:27 +02:00
|
|
|
}
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
/*
|
|
|
|
* Issue the commands to select the specified tablespace as the current one
|
|
|
|
* in the target database.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_selectTablespace(ArchiveHandle *AH, const char *tablespace)
|
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
2004-11-06 20:36:02 +01:00
|
|
|
PQExpBuffer qry;
|
|
|
|
const char *want,
|
|
|
|
*have;
|
|
|
|
|
2008-03-20 18:36:58 +01:00
|
|
|
/* do nothing in --no-tablespaces mode */
|
2016-01-13 23:48:33 +01:00
|
|
|
if (ropt->noTablespace)
|
2008-03-20 18:36:58 +01:00
|
|
|
return;
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
have = AH->currTablespace;
|
|
|
|
want = tablespace;
|
|
|
|
|
|
|
|
/* no need to do anything for non-tablespace object */
|
|
|
|
if (!want)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (have && strcmp(want, have) == 0)
|
|
|
|
return; /* no need to do anything */
|
|
|
|
|
|
|
|
qry = createPQExpBuffer();
|
|
|
|
|
|
|
|
if (strcmp(want, "") == 0)
|
|
|
|
{
|
|
|
|
/* We want the tablespace to be the database's default */
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(qry, "SET default_tablespace = ''");
|
2004-11-06 20:36:02 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We want an explicit tablespace */
|
|
|
|
appendPQExpBuffer(qry, "SET default_tablespace = %s", fmtId(want));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RestoringToDB(AH))
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
res = PQexec(AH->connection, qry->data);
|
|
|
|
|
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
warn_or_exit_horribly(AH,
|
2012-03-20 22:38:11 +01:00
|
|
|
"could not set default_tablespace to %s: %s",
|
|
|
|
fmtId(want), PQerrorMessage(AH->connection));
|
2004-11-06 20:36:02 +01:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ahprintf(AH, "%s;\n\n", qry->data);
|
|
|
|
|
|
|
|
if (AH->currTablespace)
|
|
|
|
free(AH->currTablespace);
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->currTablespace = pg_strdup(want);
|
2004-11-06 20:36:02 +01:00
|
|
|
|
|
|
|
destroyPQExpBuffer(qry);
|
|
|
|
}
|
2002-05-11 00:36:27 +02:00
|
|
|
|
2019-03-06 18:54:38 +01:00
|
|
|
/*
|
|
|
|
* Set the proper default_table_access_method value for the table.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
|
|
|
|
{
|
|
|
|
PQExpBuffer cmd;
|
|
|
|
const char *want,
|
|
|
|
*have;
|
|
|
|
|
|
|
|
have = AH->currTableAm;
|
|
|
|
want = tableam;
|
|
|
|
|
|
|
|
if (!want)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (have && strcmp(want, have) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cmd = createPQExpBuffer();
|
|
|
|
appendPQExpBuffer(cmd, "SET default_table_access_method = %s;", fmtId(want));
|
|
|
|
|
|
|
|
if (RestoringToDB(AH))
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
res = PQexec(AH->connection, cmd->data);
|
|
|
|
|
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
warn_or_exit_horribly(AH,
|
2019-03-06 18:54:38 +01:00
|
|
|
"could not set default_table_access_method: %s",
|
|
|
|
PQerrorMessage(AH->connection));
|
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ahprintf(AH, "%s\n\n", cmd->data);
|
|
|
|
|
|
|
|
destroyPQExpBuffer(cmd);
|
|
|
|
|
2021-10-24 18:38:26 +02:00
|
|
|
if (AH->currTableAm)
|
|
|
|
free(AH->currTableAm);
|
2019-03-06 18:54:38 +01:00
|
|
|
AH->currTableAm = pg_strdup(want);
|
|
|
|
}
|
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
/*
|
|
|
|
* Extract an object description for a TOC entry, and append it to buf.
|
|
|
|
*
|
2013-08-13 17:45:56 +02:00
|
|
|
* This is used for ALTER ... OWNER TO.
|
2004-07-13 05:00:17 +02:00
|
|
|
*/
|
2005-01-11 06:14:13 +01:00
|
|
|
static void
|
2020-08-25 07:24:15 +02:00
|
|
|
_getObjectDescription(PQExpBuffer buf, TocEntry *te)
|
2004-07-13 05:00:17 +02:00
|
|
|
{
|
2005-01-11 06:14:13 +01:00
|
|
|
const char *type = te->desc;
|
|
|
|
|
|
|
|
/* Use ALTER TABLE for views and sequences */
|
2013-03-24 16:27:20 +01:00
|
|
|
if (strcmp(type, "VIEW") == 0 || strcmp(type, "SEQUENCE") == 0 ||
|
2013-03-04 01:23:31 +01:00
|
|
|
strcmp(type, "MATERIALIZED VIEW") == 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
type = "TABLE";
|
|
|
|
|
2013-08-13 17:45:56 +02:00
|
|
|
/* objects that don't require special decoration */
|
2011-02-12 14:54:13 +01:00
|
|
|
if (strcmp(type, "COLLATION") == 0 ||
|
|
|
|
strcmp(type, "CONVERSION") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(type, "DOMAIN") == 0 ||
|
|
|
|
strcmp(type, "TABLE") == 0 ||
|
2007-08-21 03:11:32 +02:00
|
|
|
strcmp(type, "TYPE") == 0 ||
|
2011-01-02 05:48:11 +01:00
|
|
|
strcmp(type, "FOREIGN TABLE") == 0 ||
|
2007-08-21 03:11:32 +02:00
|
|
|
strcmp(type, "TEXT SEARCH DICTIONARY") == 0 ||
|
2013-08-13 17:45:56 +02:00
|
|
|
strcmp(type, "TEXT SEARCH CONFIGURATION") == 0 ||
|
Fix assorted errors in pg_dump's handling of extended statistics objects.
pg_dump supposed that a stats object necessarily shares the same schema
as its underlying table, and that it doesn't have a separate owner.
These things may have been true during early development of the feature,
but they are not true as of v10 release.
Failure to track the object's schema separately turns out to have only
limited consequences, because pg_get_statisticsobjdef() always schema-
qualifies the target object name in the generated CREATE STATISTICS command
(a decision out of step with the rest of ruleutils.c, but I digress).
Therefore the restored object would be in the right schema, so that the
only problem is that the TOC entry would be mislabeled as to schema. That
could lead to wrong decisions for schema-selective restores, for example.
The ownership issue is a bit more serious: not only was the TOC entry
potentially mislabeled as to owner, but pg_dump didn't bother to issue an
ALTER OWNER command at all, so that after restore the stats object would
continue to be owned by the restoring superuser.
A final point is that decisions as to whether to dump a stats object or
not were driven by whether the underlying table was dumped or not. While
that's not wrong on its face, it won't scale nicely to the planned future
extension to cross-table statistics. Moreover, that design decision comes
out of the view of stats objects as being auxiliary to a particular table,
like a rule or trigger, which is exactly where the above problems came
from. Since we're now treating stats objects more like independent objects
in their own right, they ought to behave like standalone objects for this
purpose too. So change to using the generic selectDumpableObject() logic
for them (which presently amounts to "dump if containing schema is to be
dumped").
Along the way to fixing this, restructure so that getExtendedStatistics
collects the identity info (only) for all extended stats objects in one
query, and then for each object actually being dumped, we retrieve the
definition in dumpStatisticsExt. This is necessary to ensure that
schema-qualification in the generated CREATE STATISTICS command happens
with respect to the search path that pg_dump will now be using at restore
time (ie, the schema the stats object is in, not that of the underlying
table). It's probably also significantly faster in the typical scenario
where only a minority of tables have extended stats.
Back-patch to v10 where extended stats were introduced.
Discussion: https://postgr.es/m/18272.1518328606@sss.pgh.pa.us
2018-02-11 19:24:15 +01:00
|
|
|
strcmp(type, "STATISTICS") == 0 ||
|
2013-08-13 17:45:56 +02:00
|
|
|
/* non-schema-specified objects */
|
|
|
|
strcmp(type, "DATABASE") == 0 ||
|
2007-03-26 18:58:41 +02:00
|
|
|
strcmp(type, "PROCEDURAL LANGUAGE") == 0 ||
|
2008-12-19 17:25:19 +01:00
|
|
|
strcmp(type, "SCHEMA") == 0 ||
|
2017-07-23 02:20:09 +02:00
|
|
|
strcmp(type, "EVENT TRIGGER") == 0 ||
|
2008-12-19 17:25:19 +01:00
|
|
|
strcmp(type, "FOREIGN DATA WRAPPER") == 0 ||
|
|
|
|
strcmp(type, "SERVER") == 0 ||
|
2017-01-19 18:00:00 +01:00
|
|
|
strcmp(type, "PUBLICATION") == 0 ||
|
|
|
|
strcmp(type, "SUBSCRIPTION") == 0 ||
|
2008-12-19 17:25:19 +01:00
|
|
|
strcmp(type, "USER MAPPING") == 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
{
|
Avoid using unsafe search_path settings during dump and restore.
Historically, pg_dump has "set search_path = foo, pg_catalog" when
dumping an object in schema "foo", and has also caused that setting
to be used while restoring the object. This is problematic because
functions and operators in schema "foo" could capture references meant
to refer to pg_catalog entries, both in the queries issued by pg_dump
and those issued during the subsequent restore run. That could
result in dump/restore misbehavior, or in privilege escalation if a
nefarious user installs trojan-horse functions or operators.
This patch changes pg_dump so that it does not change the search_path
dynamically. The emitted restore script sets the search_path to what
was used at dump time, and then leaves it alone thereafter. Created
objects are placed in the correct schema, regardless of the active
search_path, by dint of schema-qualifying their names in the CREATE
commands, as well as in subsequent ALTER and ALTER-like commands.
Since this change requires a change in the behavior of pg_restore
when processing an archive file made according to this new convention,
bump the archive file version number; old versions of pg_restore will
therefore refuse to process files made with new versions of pg_dump.
Security: CVE-2018-1058
2018-02-26 16:18:21 +01:00
|
|
|
appendPQExpBuffer(buf, "%s ", type);
|
|
|
|
if (te->namespace && *te->namespace)
|
|
|
|
appendPQExpBuffer(buf, "%s.", fmtId(te->namespace));
|
|
|
|
appendPQExpBufferStr(buf, fmtId(te->tag));
|
2005-01-11 06:14:13 +01:00
|
|
|
return;
|
|
|
|
}
|
2002-08-18 11:36:26 +02:00
|
|
|
|
2010-02-18 02:29:10 +01:00
|
|
|
/* BLOBs just have a name, but it's numeric so must not use fmtId */
|
|
|
|
if (strcmp(type, "BLOB") == 0)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2004-07-13 05:00:17 +02:00
|
|
|
/*
|
2005-01-11 06:14:13 +01:00
|
|
|
* These object types require additional decoration. Fortunately, the
|
|
|
|
* information needed is exactly what's in the DROP command.
|
2004-07-13 05:00:17 +02:00
|
|
|
*/
|
2005-01-11 06:14:13 +01:00
|
|
|
if (strcmp(type, "AGGREGATE") == 0 ||
|
|
|
|
strcmp(type, "FUNCTION") == 0 ||
|
|
|
|
strcmp(type, "OPERATOR") == 0 ||
|
2007-01-23 18:54:50 +01:00
|
|
|
strcmp(type, "OPERATOR CLASS") == 0 ||
|
2017-11-30 14:46:13 +01:00
|
|
|
strcmp(type, "OPERATOR FAMILY") == 0 ||
|
|
|
|
strcmp(type, "PROCEDURE") == 0)
|
2004-07-13 05:00:17 +02:00
|
|
|
{
|
2005-01-11 06:14:13 +01:00
|
|
|
/* Chop "DROP " off the front and make a modifiable copy */
|
2011-11-25 21:40:51 +01:00
|
|
|
char *first = pg_strdup(te->dropStmt + 5);
|
2005-01-11 06:14:13 +01:00
|
|
|
char *last;
|
2004-07-13 05:00:17 +02:00
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
/* point to last character in string */
|
|
|
|
last = first + strlen(first) - 1;
|
2004-07-13 05:00:17 +02:00
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
/* Strip off any ';' or '\n' at the end */
|
|
|
|
while (last >= first && (*last == '\n' || *last == ';'))
|
|
|
|
last--;
|
|
|
|
*(last + 1) = '\0';
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2005-01-11 06:14:13 +01:00
|
|
|
appendPQExpBufferStr(buf, first);
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2004-07-13 05:00:17 +02:00
|
|
|
free(first);
|
2005-01-11 06:14:13 +01:00
|
|
|
return;
|
2004-07-13 05:00:17 +02:00
|
|
|
}
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("don't know how to set owner for object type \"%s\"",
|
2005-01-11 06:14:13 +01:00
|
|
|
type);
|
2004-07-13 05:00:17 +02:00
|
|
|
}
|
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* Emit the SQL commands to create the object represented by a TOC entry
|
|
|
|
*
|
|
|
|
* This now also includes issuing an ALTER OWNER command to restore the
|
|
|
|
* object's ownership, if wanted. But note that the object's permissions
|
|
|
|
* will remain at default, until the matching ACL TOC entry is restored.
|
|
|
|
*/
|
2004-07-13 05:00:17 +02:00
|
|
|
static void
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
_printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
|
2000-07-04 16:25:28 +02:00
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
|
|
|
|
2019-03-06 18:54:38 +01:00
|
|
|
/* Select owner, schema, tablespace and default AM as necessary */
|
2004-08-13 23:37:28 +02:00
|
|
|
_becomeOwner(AH, te);
|
|
|
|
_selectOutputSchema(AH, te->namespace);
|
2004-11-06 20:36:02 +01:00
|
|
|
_selectTablespace(AH, te->tablespace);
|
2019-03-06 18:54:38 +01:00
|
|
|
_selectTableAccessMethod(AH, te->tableam);
|
2004-08-13 23:37:28 +02:00
|
|
|
|
|
|
|
/* Emit header comment for item */
|
2004-08-30 21:44:14 +02:00
|
|
|
if (!AH->noTocComments)
|
2003-12-06 04:00:16 +01:00
|
|
|
{
|
2004-08-30 21:44:14 +02:00
|
|
|
const char *pfx;
|
2012-02-23 21:53:09 +01:00
|
|
|
char *sanitized_name;
|
|
|
|
char *sanitized_schema;
|
|
|
|
char *sanitized_owner;
|
2004-08-30 21:44:14 +02:00
|
|
|
|
|
|
|
if (isData)
|
|
|
|
pfx = "Data for ";
|
|
|
|
else
|
|
|
|
pfx = "";
|
|
|
|
|
|
|
|
ahprintf(AH, "--\n");
|
|
|
|
if (AH->public.verbose)
|
2003-12-06 04:00:16 +01:00
|
|
|
{
|
2004-08-30 21:44:14 +02:00
|
|
|
ahprintf(AH, "-- TOC entry %d (class %u OID %u)\n",
|
|
|
|
te->dumpId, te->catalogId.tableoid, te->catalogId.oid);
|
|
|
|
if (te->nDeps > 0)
|
|
|
|
{
|
|
|
|
int i;
|
2003-12-06 04:00:16 +01:00
|
|
|
|
2004-08-30 21:44:14 +02:00
|
|
|
ahprintf(AH, "-- Dependencies:");
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
ahprintf(AH, " %d", te->dependencies[i]);
|
|
|
|
ahprintf(AH, "\n");
|
|
|
|
}
|
2003-12-06 04:00:16 +01:00
|
|
|
}
|
2012-02-23 21:53:09 +01:00
|
|
|
|
2019-02-01 15:29:42 +01:00
|
|
|
sanitized_name = sanitize_line(te->tag, false);
|
|
|
|
sanitized_schema = sanitize_line(te->namespace, true);
|
|
|
|
sanitized_owner = sanitize_line(ropt->noOwner ? NULL : te->owner, true);
|
2012-02-23 21:53:09 +01:00
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
ahprintf(AH, "-- %sName: %s; Type: %s; Schema: %s; Owner: %s",
|
2012-02-23 21:53:09 +01:00
|
|
|
pfx, sanitized_name, te->desc, sanitized_schema,
|
|
|
|
sanitized_owner);
|
|
|
|
|
|
|
|
free(sanitized_name);
|
|
|
|
free(sanitized_schema);
|
|
|
|
free(sanitized_owner);
|
|
|
|
|
2015-05-11 17:45:43 +02:00
|
|
|
if (te->tablespace && strlen(te->tablespace) > 0 && !ropt->noTablespace)
|
2012-02-23 21:53:09 +01:00
|
|
|
{
|
|
|
|
char *sanitized_tablespace;
|
|
|
|
|
2019-02-01 15:29:42 +01:00
|
|
|
sanitized_tablespace = sanitize_line(te->tablespace, false);
|
2012-02-23 21:53:09 +01:00
|
|
|
ahprintf(AH, "; Tablespace: %s", sanitized_tablespace);
|
|
|
|
free(sanitized_tablespace);
|
|
|
|
}
|
2004-11-06 20:36:02 +01:00
|
|
|
ahprintf(AH, "\n");
|
|
|
|
|
2004-08-30 21:44:14 +02:00
|
|
|
if (AH->PrintExtraTocPtr != NULL)
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->PrintExtraTocPtr(AH, te);
|
2004-08-30 21:44:14 +02:00
|
|
|
ahprintf(AH, "--\n\n");
|
2003-12-06 04:00:16 +01:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2004-08-13 23:37:28 +02:00
|
|
|
/*
|
|
|
|
* Actually print the definition.
|
|
|
|
*
|
2005-01-11 06:14:13 +01:00
|
|
|
* Really crude hack for suppressing AUTHORIZATION clause that old pg_dump
|
2021-06-29 03:34:55 +02:00
|
|
|
* versions put into CREATE SCHEMA. Don't mutate the variant for schema
|
|
|
|
* "public" that is a comment. We have to do this when --no-owner mode is
|
|
|
|
* selected. This is ugly, but I see no other good way ...
|
2004-08-13 23:37:28 +02:00
|
|
|
*/
|
2021-06-29 03:34:55 +02:00
|
|
|
if (ropt->noOwner &&
|
|
|
|
strcmp(te->desc, "SCHEMA") == 0 && strncmp(te->defn, "--", 2) != 0)
|
2004-07-13 05:00:17 +02:00
|
|
|
{
|
2005-01-11 06:14:13 +01:00
|
|
|
ahprintf(AH, "CREATE SCHEMA %s;\n\n\n", fmtId(te->tag));
|
2003-09-24 01:31:52 +02:00
|
|
|
}
|
2004-08-13 23:37:28 +02:00
|
|
|
else
|
2003-09-24 01:31:52 +02:00
|
|
|
{
|
2019-04-26 18:03:59 +02:00
|
|
|
if (te->defn && strlen(te->defn) > 0)
|
2004-08-13 23:37:28 +02:00
|
|
|
ahprintf(AH, "%s\n\n", te->defn);
|
2004-07-13 05:00:17 +02:00
|
|
|
}
|
2004-08-13 23:37:28 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we aren't using SET SESSION AUTH to determine ownership, we must
|
2021-06-29 03:34:55 +02:00
|
|
|
* instead issue an ALTER OWNER command. Schema "public" is special; when
|
|
|
|
* a dump emits a comment in lieu of creating it, we use ALTER OWNER even
|
|
|
|
* when using SET SESSION for all other objects. We assume that anything
|
|
|
|
* without a DROP command is not a separately ownable object. All the
|
|
|
|
* categories with DROP commands must appear in one list or the other.
|
2004-08-13 23:37:28 +02:00
|
|
|
*/
|
2021-06-29 03:34:55 +02:00
|
|
|
if (!ropt->noOwner &&
|
|
|
|
(!ropt->use_setsessauth ||
|
|
|
|
(strcmp(te->desc, "SCHEMA") == 0 &&
|
|
|
|
strncmp(te->defn, "--", 2) == 0)) &&
|
2019-04-26 18:03:59 +02:00
|
|
|
te->owner && strlen(te->owner) > 0 &&
|
|
|
|
te->dropStmt && strlen(te->dropStmt) > 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
{
|
|
|
|
if (strcmp(te->desc, "AGGREGATE") == 0 ||
|
2010-02-18 02:29:10 +01:00
|
|
|
strcmp(te->desc, "BLOB") == 0 ||
|
2011-02-12 14:54:13 +01:00
|
|
|
strcmp(te->desc, "COLLATION") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "CONVERSION") == 0 ||
|
|
|
|
strcmp(te->desc, "DATABASE") == 0 ||
|
|
|
|
strcmp(te->desc, "DOMAIN") == 0 ||
|
|
|
|
strcmp(te->desc, "FUNCTION") == 0 ||
|
|
|
|
strcmp(te->desc, "OPERATOR") == 0 ||
|
|
|
|
strcmp(te->desc, "OPERATOR CLASS") == 0 ||
|
2007-01-23 18:54:50 +01:00
|
|
|
strcmp(te->desc, "OPERATOR FAMILY") == 0 ||
|
2017-11-30 14:46:13 +01:00
|
|
|
strcmp(te->desc, "PROCEDURE") == 0 ||
|
2007-03-26 18:58:41 +02:00
|
|
|
strcmp(te->desc, "PROCEDURAL LANGUAGE") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "SCHEMA") == 0 ||
|
2017-07-23 02:20:09 +02:00
|
|
|
strcmp(te->desc, "EVENT TRIGGER") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "TABLE") == 0 ||
|
|
|
|
strcmp(te->desc, "TYPE") == 0 ||
|
|
|
|
strcmp(te->desc, "VIEW") == 0 ||
|
2013-03-04 01:23:31 +01:00
|
|
|
strcmp(te->desc, "MATERIALIZED VIEW") == 0 ||
|
2007-08-21 03:11:32 +02:00
|
|
|
strcmp(te->desc, "SEQUENCE") == 0 ||
|
2011-01-02 05:48:11 +01:00
|
|
|
strcmp(te->desc, "FOREIGN TABLE") == 0 ||
|
2007-08-21 03:11:32 +02:00
|
|
|
strcmp(te->desc, "TEXT SEARCH DICTIONARY") == 0 ||
|
2008-12-19 17:25:19 +01:00
|
|
|
strcmp(te->desc, "TEXT SEARCH CONFIGURATION") == 0 ||
|
|
|
|
strcmp(te->desc, "FOREIGN DATA WRAPPER") == 0 ||
|
2017-01-19 18:00:00 +01:00
|
|
|
strcmp(te->desc, "SERVER") == 0 ||
|
Fix assorted errors in pg_dump's handling of extended statistics objects.
pg_dump supposed that a stats object necessarily shares the same schema
as its underlying table, and that it doesn't have a separate owner.
These things may have been true during early development of the feature,
but they are not true as of v10 release.
Failure to track the object's schema separately turns out to have only
limited consequences, because pg_get_statisticsobjdef() always schema-
qualifies the target object name in the generated CREATE STATISTICS command
(a decision out of step with the rest of ruleutils.c, but I digress).
Therefore the restored object would be in the right schema, so that the
only problem is that the TOC entry would be mislabeled as to schema. That
could lead to wrong decisions for schema-selective restores, for example.
The ownership issue is a bit more serious: not only was the TOC entry
potentially mislabeled as to owner, but pg_dump didn't bother to issue an
ALTER OWNER command at all, so that after restore the stats object would
continue to be owned by the restoring superuser.
A final point is that decisions as to whether to dump a stats object or
not were driven by whether the underlying table was dumped or not. While
that's not wrong on its face, it won't scale nicely to the planned future
extension to cross-table statistics. Moreover, that design decision comes
out of the view of stats objects as being auxiliary to a particular table,
like a rule or trigger, which is exactly where the above problems came
from. Since we're now treating stats objects more like independent objects
in their own right, they ought to behave like standalone objects for this
purpose too. So change to using the generic selectDumpableObject() logic
for them (which presently amounts to "dump if containing schema is to be
dumped").
Along the way to fixing this, restructure so that getExtendedStatistics
collects the identity info (only) for all extended stats objects in one
query, and then for each object actually being dumped, we retrieve the
definition in dumpStatisticsExt. This is necessary to ensure that
schema-qualification in the generated CREATE STATISTICS command happens
with respect to the search path that pg_dump will now be using at restore
time (ie, the schema the stats object is in, not that of the underlying
table). It's probably also significantly faster in the typical scenario
where only a minority of tables have extended stats.
Back-patch to v10 where extended stats were introduced.
Discussion: https://postgr.es/m/18272.1518328606@sss.pgh.pa.us
2018-02-11 19:24:15 +01:00
|
|
|
strcmp(te->desc, "STATISTICS") == 0 ||
|
2017-01-19 18:00:00 +01:00
|
|
|
strcmp(te->desc, "PUBLICATION") == 0 ||
|
|
|
|
strcmp(te->desc, "SUBSCRIPTION") == 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
{
|
|
|
|
PQExpBuffer temp = createPQExpBuffer();
|
|
|
|
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(temp, "ALTER ");
|
2020-08-25 07:24:15 +02:00
|
|
|
_getObjectDescription(temp, te);
|
2005-01-11 06:14:13 +01:00
|
|
|
appendPQExpBuffer(temp, " OWNER TO %s;", fmtId(te->owner));
|
|
|
|
ahprintf(AH, "%s\n\n", temp->data);
|
|
|
|
destroyPQExpBuffer(temp);
|
|
|
|
}
|
|
|
|
else if (strcmp(te->desc, "CAST") == 0 ||
|
|
|
|
strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
2005-08-22 21:40:37 +02:00
|
|
|
strcmp(te->desc, "CONSTRAINT") == 0 ||
|
Move handling of database properties from pg_dumpall into pg_dump.
This patch rearranges the division of labor between pg_dump and pg_dumpall
so that pg_dump itself handles all properties attached to a single
database. Notably, a database's ACL (GRANT/REVOKE status) and local GUC
settings established by ALTER DATABASE SET and ALTER ROLE IN DATABASE SET
can be dumped and restored by pg_dump. This is a long-requested
improvement.
"pg_dumpall -g" will now produce only role- and tablespace-related output,
nothing about individual databases. The total output of a regular
pg_dumpall run remains the same.
pg_dump (or pg_restore) will restore database-level properties only when
creating the target database with --create. This applies not only to
ACLs and GUCs but to the other database properties it already handled,
that is database comments and security labels. This is more consistent
and useful, but does represent an incompatibility in the behavior seen
without --create.
(This change makes the proposed patch to have pg_dump use "COMMENT ON
DATABASE CURRENT_DATABASE" unnecessary, since there is no case where
the command is issued that we won't know the true name of the database.
We might still want that patch as a feature in its own right, but pg_dump
no longer needs it.)
pg_dumpall with --clean will now drop and recreate the "postgres" and
"template1" databases in the target cluster, allowing their locale and
encoding settings to be changed if necessary, and providing a cleaner
way to set nondefault tablespaces for them than we had before. This
means that such a script must now always be started in the "postgres"
database; the order of drops and reconnects will not work otherwise.
Without --clean, the script will not adjust any database-level properties
of those two databases (including their comments, ACLs, and security
labels, which it formerly would try to set).
Another minor incompatibility is that the CREATE DATABASE commands in a
pg_dumpall script will now always specify locale and encoding settings.
Formerly those would be omitted if they matched the cluster's default.
While that behavior had some usefulness in some migration scenarios,
it also posed a significant hazard of unwanted locale/encoding changes.
To migrate to another locale/encoding, it's now necessary to use pg_dump
without --create to restore into a database with the desired settings.
Commit 4bd371f6f's hack to emit "SET default_transaction_read_only = off"
is gone: we now dodge that problem by the expedient of not issuing ALTER
DATABASE SET commands until after reconnecting to the target database.
Therefore, such settings won't apply during the restore session.
In passing, improve some shaky grammar in the docs, and add a note pointing
out that pg_dumpall's output can't be expected to load without any errors.
(Someday we might want to fix that, but this is not that patch.)
Haribabu Kommi, reviewed at various times by Andreas Karlsson,
Vaishnavi Prabakaran, and Robert Haas; further hacking by me.
Discussion: https://postgr.es/m/CAJrrPGcUurV0eWTeXODwsOYFN=Ekq36t1s0YnFYUNzsmRfdAyA@mail.gmail.com
2018-01-22 20:09:09 +01:00
|
|
|
strcmp(te->desc, "DATABASE PROPERTIES") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "DEFAULT") == 0 ||
|
|
|
|
strcmp(te->desc, "FK CONSTRAINT") == 0 ||
|
2005-08-22 21:40:37 +02:00
|
|
|
strcmp(te->desc, "INDEX") == 0 ||
|
2005-01-11 06:14:13 +01:00
|
|
|
strcmp(te->desc, "RULE") == 0 ||
|
2008-12-19 17:25:19 +01:00
|
|
|
strcmp(te->desc, "TRIGGER") == 0 ||
|
Row-Level Security Policies (RLS)
Building on the updatable security-barrier views work, add the
ability to define policies on tables to limit the set of rows
which are returned from a query and which are allowed to be added
to a table. Expressions defined by the policy for filtering are
added to the security barrier quals of the query, while expressions
defined to check records being added to a table are added to the
with-check options of the query.
New top-level commands are CREATE/ALTER/DROP POLICY and are
controlled by the table owner. Row Security is able to be enabled
and disabled by the owner on a per-table basis using
ALTER TABLE .. ENABLE/DISABLE ROW SECURITY.
Per discussion, ROW SECURITY is disabled on tables by default and
must be enabled for policies on the table to be used. If no
policies exist on a table with ROW SECURITY enabled, a default-deny
policy is used and no records will be visible.
By default, row security is applied at all times except for the
table owner and the superuser. A new GUC, row_security, is added
which can be set to ON, OFF, or FORCE. When set to FORCE, row
security will be applied even for the table owner and superusers.
When set to OFF, row security will be disabled when allowed and an
error will be thrown if the user does not have rights to bypass row
security.
Per discussion, pg_dump sets row_security = OFF by default to ensure
that exports and backups will have all data in the table or will
error if there are insufficient privileges to bypass row security.
A new option has been added to pg_dump, --enable-row-security, to
ask pg_dump to export with row security enabled.
A new role capability, BYPASSRLS, which can only be set by the
superuser, is added to allow other users to be able to bypass row
security using row_security = OFF.
Many thanks to the various individuals who have helped with the
design, particularly Robert Haas for his feedback.
Authors include Craig Ringer, KaiGai Kohei, Adam Brightwell, Dean
Rasheed, with additional changes and rework by me.
Reviewers have included all of the above, Greg Smith,
Jeff McCormick, and Robert Haas.
2014-09-19 17:18:35 +02:00
|
|
|
strcmp(te->desc, "ROW SECURITY") == 0 ||
|
Rename pg_rowsecurity -> pg_policy and other fixes
As pointed out by Robert, we should really have named pg_rowsecurity
pg_policy, as the objects stored in that catalog are policies. This
patch fixes that and updates the column names to start with 'pol' to
match the new catalog name.
The security consideration for COPY with row level security, also
pointed out by Robert, has also been addressed by remembering and
re-checking the OID of the relation initially referenced during COPY
processing, to make sure it hasn't changed under us by the time we
finish planning out the query which has been built.
Robert and Alvaro also commented on missing OCLASS and OBJECT entries
for POLICY (formerly ROWSECURITY or POLICY, depending) in various
places. This patch fixes that too, which also happens to add the
ability to COMMENT on policies.
In passing, attempt to improve the consistency of messages, comments,
and documentation as well. This removes various incarnations of
'row-security', 'row-level security', 'Row-security', etc, in favor
of 'policy', 'row level security' or 'row_security' as appropriate.
Happy Thanksgiving!
2014-11-27 07:06:36 +01:00
|
|
|
strcmp(te->desc, "POLICY") == 0 ||
|
Fix assorted errors in pg_dump's handling of extended statistics objects.
pg_dump supposed that a stats object necessarily shares the same schema
as its underlying table, and that it doesn't have a separate owner.
These things may have been true during early development of the feature,
but they are not true as of v10 release.
Failure to track the object's schema separately turns out to have only
limited consequences, because pg_get_statisticsobjdef() always schema-
qualifies the target object name in the generated CREATE STATISTICS command
(a decision out of step with the rest of ruleutils.c, but I digress).
Therefore the restored object would be in the right schema, so that the
only problem is that the TOC entry would be mislabeled as to schema. That
could lead to wrong decisions for schema-selective restores, for example.
The ownership issue is a bit more serious: not only was the TOC entry
potentially mislabeled as to owner, but pg_dump didn't bother to issue an
ALTER OWNER command at all, so that after restore the stats object would
continue to be owned by the restoring superuser.
A final point is that decisions as to whether to dump a stats object or
not were driven by whether the underlying table was dumped or not. While
that's not wrong on its face, it won't scale nicely to the planned future
extension to cross-table statistics. Moreover, that design decision comes
out of the view of stats objects as being auxiliary to a particular table,
like a rule or trigger, which is exactly where the above problems came
from. Since we're now treating stats objects more like independent objects
in their own right, they ought to behave like standalone objects for this
purpose too. So change to using the generic selectDumpableObject() logic
for them (which presently amounts to "dump if containing schema is to be
dumped").
Along the way to fixing this, restructure so that getExtendedStatistics
collects the identity info (only) for all extended stats objects in one
query, and then for each object actually being dumped, we retrieve the
definition in dumpStatisticsExt. This is necessary to ensure that
schema-qualification in the generated CREATE STATISTICS command happens
with respect to the search path that pg_dump will now be using at restore
time (ie, the schema the stats object is in, not that of the underlying
table). It's probably also significantly faster in the typical scenario
where only a minority of tables have extended stats.
Back-patch to v10 where extended stats were introduced.
Discussion: https://postgr.es/m/18272.1518328606@sss.pgh.pa.us
2018-02-11 19:24:15 +01:00
|
|
|
strcmp(te->desc, "USER MAPPING") == 0)
|
2005-01-11 06:14:13 +01:00
|
|
|
{
|
|
|
|
/* these object types don't have separate owners */
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("don't know how to set owner for object type \"%s\"",
|
2005-01-11 06:14:13 +01:00
|
|
|
te->desc);
|
|
|
|
}
|
2003-09-24 01:31:52 +02:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2004-07-19 23:02:17 +02:00
|
|
|
/*
|
|
|
|
* If it's an ACL entry, it might contain SET SESSION AUTHORIZATION
|
|
|
|
* commands, so we can no longer assume we know the current auth setting.
|
|
|
|
*/
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
if (_tocEntryIsACL(te))
|
2004-07-19 23:02:17 +02:00
|
|
|
{
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
|
|
|
AH->currUser = NULL;
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
2012-02-23 21:53:09 +01:00
|
|
|
/*
|
2019-02-01 15:29:42 +01:00
|
|
|
* Sanitize a string to be included in an SQL comment or TOC listing, by
|
|
|
|
* replacing any newlines with spaces. This ensures each logical output line
|
|
|
|
* is in fact one physical output line, to prevent corruption of the dump
|
|
|
|
* (which could, in the worst case, present an SQL injection vulnerability
|
|
|
|
* if someone were to incautiously load a dump containing objects with
|
|
|
|
* maliciously crafted names).
|
|
|
|
*
|
|
|
|
* The result is a freshly malloc'd string. If the input string is NULL,
|
|
|
|
* return a malloc'ed empty string, unless want_hyphen, in which case return a
|
|
|
|
* malloc'ed hyphen.
|
|
|
|
*
|
|
|
|
* Note that we currently don't bother to quote names, meaning that the name
|
|
|
|
* fields aren't automatically parseable. "pg_restore -L" doesn't care because
|
|
|
|
* it only examines the dumpId field, but someday we might want to try harder.
|
2012-02-23 21:53:09 +01:00
|
|
|
*/
|
|
|
|
static char *
|
2019-02-01 15:29:42 +01:00
|
|
|
sanitize_line(const char *str, bool want_hyphen)
|
2012-02-23 21:53:09 +01:00
|
|
|
{
|
|
|
|
char *result;
|
|
|
|
char *s;
|
|
|
|
|
2019-02-01 15:29:42 +01:00
|
|
|
if (!str)
|
|
|
|
return pg_strdup(want_hyphen ? "-" : "");
|
|
|
|
|
2012-02-23 21:53:09 +01:00
|
|
|
result = pg_strdup(str);
|
|
|
|
|
|
|
|
for (s = result; *s != '\0'; s++)
|
|
|
|
{
|
|
|
|
if (*s == '\n' || *s == '\r')
|
|
|
|
*s = ' ';
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* Write the file header for a custom-format archive
|
|
|
|
*/
|
2000-07-04 16:25:28 +02:00
|
|
|
void
|
|
|
|
WriteHead(ArchiveHandle *AH)
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
struct tm crtm;
|
|
|
|
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->WriteBufPtr(AH, "PGDMP", 5); /* Magic code */
|
|
|
|
AH->WriteBytePtr(AH, ARCHIVE_MAJOR(AH->version));
|
|
|
|
AH->WriteBytePtr(AH, ARCHIVE_MINOR(AH->version));
|
|
|
|
AH->WriteBytePtr(AH, ARCHIVE_REV(AH->version));
|
|
|
|
AH->WriteBytePtr(AH, AH->intSize);
|
|
|
|
AH->WriteBytePtr(AH, AH->offSize);
|
|
|
|
AH->WriteBytePtr(AH, AH->format);
|
2000-07-21 13:40:08 +02:00
|
|
|
WriteInt(AH, AH->compression);
|
|
|
|
crtm = *localtime(&AH->createDate);
|
|
|
|
WriteInt(AH, crtm.tm_sec);
|
|
|
|
WriteInt(AH, crtm.tm_min);
|
|
|
|
WriteInt(AH, crtm.tm_hour);
|
|
|
|
WriteInt(AH, crtm.tm_mday);
|
|
|
|
WriteInt(AH, crtm.tm_mon);
|
|
|
|
WriteInt(AH, crtm.tm_year);
|
|
|
|
WriteInt(AH, crtm.tm_isdst);
|
2002-08-10 18:57:32 +02:00
|
|
|
WriteStr(AH, PQdb(AH->connection));
|
2004-11-06 20:36:02 +01:00
|
|
|
WriteStr(AH, AH->public.remoteVersionStr);
|
|
|
|
WriteStr(AH, PG_VERSION);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ReadHead(ArchiveHandle *AH)
|
|
|
|
{
|
2021-04-01 19:34:16 +02:00
|
|
|
char vmaj,
|
|
|
|
vmin,
|
|
|
|
vrev;
|
2000-07-21 13:40:08 +02:00
|
|
|
int fmt;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2010-06-28 04:07:02 +02:00
|
|
|
/*
|
|
|
|
* If we haven't already read the header, do so.
|
|
|
|
*
|
|
|
|
* NB: this code must agree with _discoverArchiveFormat(). Maybe find a
|
|
|
|
* way to unify the cases?
|
|
|
|
*/
|
2000-07-06 20:39:39 +02:00
|
|
|
if (!AH->readHeader)
|
|
|
|
{
|
2021-04-01 19:34:16 +02:00
|
|
|
char tmpMag[7];
|
2016-10-25 18:00:00 +02:00
|
|
|
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->ReadBufPtr(AH, tmpMag, 5);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (strncmp(tmpMag, "PGDMP", 5) != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("did not find magic string in file header");
|
2021-04-01 19:34:16 +02:00
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
vmaj = AH->ReadBytePtr(AH);
|
|
|
|
vmin = AH->ReadBytePtr(AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
if (vmaj > 1 || (vmaj == 1 && vmin > 0)) /* Version > 1.0 */
|
|
|
|
vrev = AH->ReadBytePtr(AH);
|
|
|
|
else
|
|
|
|
vrev = 0;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
AH->version = MAKE_ARCHIVE_VERSION(vmaj, vmin, vrev);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
|
|
|
|
fatal("unsupported version (%d.%d) in file header",
|
|
|
|
vmaj, vmin);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
AH->intSize = AH->ReadBytePtr(AH);
|
|
|
|
if (AH->intSize > 32)
|
|
|
|
fatal("sanity check on integer size (%lu) failed",
|
|
|
|
(unsigned long) AH->intSize);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
if (AH->intSize > sizeof(int))
|
|
|
|
pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
if (AH->version >= K_VERS_1_7)
|
|
|
|
AH->offSize = AH->ReadBytePtr(AH);
|
|
|
|
else
|
|
|
|
AH->offSize = AH->intSize;
|
2002-10-22 21:15:23 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
fmt = AH->ReadBytePtr(AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2021-04-01 19:34:16 +02:00
|
|
|
if (AH->format != fmt)
|
|
|
|
fatal("expected format (%d) differs from format found in file (%d)",
|
|
|
|
AH->format, fmt);
|
2000-07-04 16:25:28 +02:00
|
|
|
|
|
|
|
if (AH->version >= K_VERS_1_2)
|
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->version < K_VERS_1_4)
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->compression = AH->ReadBytePtr(AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
|
|
|
AH->compression = ReadInt(AH);
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
else
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->compression = Z_DEFAULT_COMPRESSION;
|
2000-07-04 16:25:28 +02:00
|
|
|
|
2000-07-06 20:39:39 +02:00
|
|
|
#ifndef HAVE_LIBZ
|
|
|
|
if (AH->compression != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_warning("archive is compressed, but this installation does not support compression -- no data will be available");
|
2000-07-04 16:25:28 +02:00
|
|
|
#endif
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->version >= K_VERS_1_4)
|
|
|
|
{
|
Work around portability issue with newer versions of mktime().
Recent glibc versions have made mktime() fail if tm_isdst is
inconsistent with the prevailing timezone; in particular it fails for
tm_isdst = 1 when the zone is UTC. (This seems wildly inconsistent
with the POSIX-mandated treatment of "incorrect" values for the other
fields of struct tm, so if you ask me it's a bug, but I bet they'll
say it's intentional.) This has been observed to cause cosmetic
problems when pg_restore'ing an archive created in a different
timezone.
To fix, do mktime() using the field values from the archive, and if
that fails try again with tm_isdst = -1. This will give a result
that's off by the UTC-offset difference from the original zone, but
that was true before, too. It's not terribly critical since we don't
do anything with the result except possibly print it. (Someday we
should flush this entire bit of logic and record a standard-format
timestamp in the archive instead. That's not okay for a back-patched
bug fix, though.)
Also, guard our only other use of mktime() by having initdb's
build_time_t() set tm_isdst = -1 not 0. This case could only have
an issue in zones that are DST year-round; but I think some do exist,
or could in future.
Per report from Wells Oliver. Back-patch to all supported
versions, since any of them might need to run with a newer glibc.
Discussion: https://postgr.es/m/CAOC+FBWDhDHO7G-i1_n_hjRzCnUeFO+H-Czi1y10mFhRWpBrew@mail.gmail.com
2021-06-13 20:32:42 +02:00
|
|
|
struct tm crtm;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
crtm.tm_sec = ReadInt(AH);
|
|
|
|
crtm.tm_min = ReadInt(AH);
|
|
|
|
crtm.tm_hour = ReadInt(AH);
|
|
|
|
crtm.tm_mday = ReadInt(AH);
|
|
|
|
crtm.tm_mon = ReadInt(AH);
|
|
|
|
crtm.tm_year = ReadInt(AH);
|
|
|
|
crtm.tm_isdst = ReadInt(AH);
|
|
|
|
|
Work around portability issue with newer versions of mktime().
Recent glibc versions have made mktime() fail if tm_isdst is
inconsistent with the prevailing timezone; in particular it fails for
tm_isdst = 1 when the zone is UTC. (This seems wildly inconsistent
with the POSIX-mandated treatment of "incorrect" values for the other
fields of struct tm, so if you ask me it's a bug, but I bet they'll
say it's intentional.) This has been observed to cause cosmetic
problems when pg_restore'ing an archive created in a different
timezone.
To fix, do mktime() using the field values from the archive, and if
that fails try again with tm_isdst = -1. This will give a result
that's off by the UTC-offset difference from the original zone, but
that was true before, too. It's not terribly critical since we don't
do anything with the result except possibly print it. (Someday we
should flush this entire bit of logic and record a standard-format
timestamp in the archive instead. That's not okay for a back-patched
bug fix, though.)
Also, guard our only other use of mktime() by having initdb's
build_time_t() set tm_isdst = -1 not 0. This case could only have
an issue in zones that are DST year-round; but I think some do exist,
or could in future.
Per report from Wells Oliver. Back-patch to all supported
versions, since any of them might need to run with a newer glibc.
Discussion: https://postgr.es/m/CAOC+FBWDhDHO7G-i1_n_hjRzCnUeFO+H-Czi1y10mFhRWpBrew@mail.gmail.com
2021-06-13 20:32:42 +02:00
|
|
|
/*
|
|
|
|
* Newer versions of glibc have mktime() report failure if tm_isdst is
|
|
|
|
* inconsistent with the prevailing timezone, e.g. tm_isdst = 1 when
|
|
|
|
* TZ=UTC. This is problematic when restoring an archive under a
|
|
|
|
* different timezone setting. If we get a failure, try again with
|
|
|
|
* tm_isdst set to -1 ("don't know").
|
|
|
|
*
|
|
|
|
* XXX with or without this hack, we reconstruct createDate
|
|
|
|
* incorrectly when the prevailing timezone is different from
|
|
|
|
* pg_dump's. Next time we bump the archive version, we should flush
|
|
|
|
* this representation and store a plain seconds-since-the-Epoch
|
|
|
|
* timestamp instead.
|
|
|
|
*/
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->createDate = mktime(&crtm);
|
|
|
|
if (AH->createDate == (time_t) -1)
|
Work around portability issue with newer versions of mktime().
Recent glibc versions have made mktime() fail if tm_isdst is
inconsistent with the prevailing timezone; in particular it fails for
tm_isdst = 1 when the zone is UTC. (This seems wildly inconsistent
with the POSIX-mandated treatment of "incorrect" values for the other
fields of struct tm, so if you ask me it's a bug, but I bet they'll
say it's intentional.) This has been observed to cause cosmetic
problems when pg_restore'ing an archive created in a different
timezone.
To fix, do mktime() using the field values from the archive, and if
that fails try again with tm_isdst = -1. This will give a result
that's off by the UTC-offset difference from the original zone, but
that was true before, too. It's not terribly critical since we don't
do anything with the result except possibly print it. (Someday we
should flush this entire bit of logic and record a standard-format
timestamp in the archive instead. That's not okay for a back-patched
bug fix, though.)
Also, guard our only other use of mktime() by having initdb's
build_time_t() set tm_isdst = -1 not 0. This case could only have
an issue in zones that are DST year-round; but I think some do exist,
or could in future.
Per report from Wells Oliver. Back-patch to all supported
versions, since any of them might need to run with a newer glibc.
Discussion: https://postgr.es/m/CAOC+FBWDhDHO7G-i1_n_hjRzCnUeFO+H-Czi1y10mFhRWpBrew@mail.gmail.com
2021-06-13 20:32:42 +02:00
|
|
|
{
|
|
|
|
crtm.tm_isdst = -1;
|
|
|
|
AH->createDate = mktime(&crtm);
|
|
|
|
if (AH->createDate == (time_t) -1)
|
|
|
|
pg_log_warning("invalid creation date in header");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (AH->version >= K_VERS_1_4)
|
|
|
|
{
|
|
|
|
AH->archdbname = ReadStr(AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2004-11-06 20:36:02 +01:00
|
|
|
if (AH->version >= K_VERS_1_10)
|
|
|
|
{
|
|
|
|
AH->archiveRemoteVersion = ReadStr(AH);
|
|
|
|
AH->archiveDumpVersion = ReadStr(AH);
|
|
|
|
}
|
2000-07-04 16:25:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-10-25 03:33:17 +02:00
|
|
|
/*
|
|
|
|
* checkSeek
|
2010-06-28 04:07:02 +02:00
|
|
|
* check to see if ftell/fseek can be performed.
|
2002-10-25 03:33:17 +02:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
checkSeek(FILE *fp)
|
|
|
|
{
|
2010-06-28 04:07:02 +02:00
|
|
|
pgoff_t tpos;
|
|
|
|
|
|
|
|
/* Check that ftello works on this file */
|
|
|
|
tpos = ftello(fp);
|
2014-02-10 00:28:14 +01:00
|
|
|
if (tpos < 0)
|
2010-06-28 04:07:02 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
|
|
|
|
* this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
|
|
|
|
* successful no-op even on files that are otherwise unseekable.
|
|
|
|
*/
|
|
|
|
if (fseeko(fp, tpos, SEEK_SET) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2002-10-25 03:33:17 +02:00
|
|
|
}
|
2005-04-15 18:40:36 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dumpTimestamp
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
|
|
|
|
{
|
2014-09-06 01:22:31 +02:00
|
|
|
char buf[64];
|
2005-04-15 18:40:36 +02:00
|
|
|
|
2014-10-27 01:59:21 +01:00
|
|
|
if (strftime(buf, sizeof(buf), PGDUMP_STRFTIME_FMT, localtime(&tim)) != 0)
|
2005-04-15 18:40:36 +02:00
|
|
|
ahprintf(AH, "-- %s %s\n\n", msg, buf);
|
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Main engine for parallel restore.
|
|
|
|
*
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
* Parallel restore is done in three phases. In this first phase,
|
|
|
|
* we'll process all SECTION_PRE_DATA TOC entries that are allowed to be
|
|
|
|
* processed in the RESTORE_PASS_MAIN pass. (In practice, that's all
|
|
|
|
* PRE_DATA items other than ACLs.) Entries we can't process now are
|
|
|
|
* added to the pending_list for later phases to deal with.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
static void
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
restore_toc_entries_prefork(ArchiveHandle *AH, TocEntry *pending_list)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2011-02-18 19:11:45 +01:00
|
|
|
bool skipped_some;
|
2009-02-02 21:07:37 +01:00
|
|
|
TocEntry *next_work_item;
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("entering restore_toc_entries_prefork");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* Adjust dependency information */
|
|
|
|
fix_dependencies(AH);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do all the early stuff in a single connection in the parent. There's no
|
|
|
|
* great point in running it in parallel, in fact it will actually run
|
|
|
|
* faster in a single connection because we avoid all the connection and
|
2013-03-24 16:27:20 +01:00
|
|
|
* setup overhead. Also, pre-9.2 pg_dump versions were not very good
|
Make pg_dump emit more accurate dependency information.
While pg_dump has included dependency information in archive-format output
ever since 7.3, it never made any large effort to ensure that that
information was actually useful. In particular, in common situations where
dependency chains include objects that aren't separately emitted in the
dump, the dependencies shown for objects that were emitted would reference
the dump IDs of these un-dumped objects, leaving no clue about which other
objects the visible objects indirectly depend on. So far, parallel
pg_restore has managed to avoid tripping over this misfeature, but only
by dint of some crude hacks like not trusting dependency information in
the pre-data section of the archive.
It seems prudent to do something about this before it rises up to bite us,
so instead of emitting the "raw" dependencies of each dumped object,
recursively search for its actual dependencies among the subset of objects
that are being dumped.
Back-patch to 9.2, since that code hasn't yet diverged materially from
HEAD. At some point we might need to back-patch further, but right now
there are no known cases where this is actively necessary. (The one known
case, bug #6699, is fixed in a different way by my previous patch.) Since
this patch depends on 9.2 changes that made TOC entries be marked before
output commences as to whether they'll be dumped, back-patching further
would require additional surgery; and as of now there's no evidence that
it's worth the risk.
2012-06-26 03:20:24 +02:00
|
|
|
* about showing all the dependencies of SECTION_PRE_DATA items, so we do
|
|
|
|
* not risk trying to process them out-of-order.
|
|
|
|
*
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
* Stuff that we can't do immediately gets added to the pending_list.
|
|
|
|
* Note: we don't yet filter out entries that aren't going to be restored.
|
|
|
|
* They might participate in dependency chains connecting entries that
|
|
|
|
* should be restored, so we treat them as live until we actually process
|
|
|
|
* them.
|
|
|
|
*
|
Make pg_dump emit more accurate dependency information.
While pg_dump has included dependency information in archive-format output
ever since 7.3, it never made any large effort to ensure that that
information was actually useful. In particular, in common situations where
dependency chains include objects that aren't separately emitted in the
dump, the dependencies shown for objects that were emitted would reference
the dump IDs of these un-dumped objects, leaving no clue about which other
objects the visible objects indirectly depend on. So far, parallel
pg_restore has managed to avoid tripping over this misfeature, but only
by dint of some crude hacks like not trusting dependency information in
the pre-data section of the archive.
It seems prudent to do something about this before it rises up to bite us,
so instead of emitting the "raw" dependencies of each dumped object,
recursively search for its actual dependencies among the subset of objects
that are being dumped.
Back-patch to 9.2, since that code hasn't yet diverged materially from
HEAD. At some point we might need to back-patch further, but right now
there are no known cases where this is actively necessary. (The one known
case, bug #6699, is fixed in a different way by my previous patch.) Since
this patch depends on 9.2 changes that made TOC entries be marked before
output commences as to whether they'll be dumped, back-patching further
would require additional surgery; and as of now there's no evidence that
it's worth the risk.
2012-06-26 03:20:24 +02:00
|
|
|
* Note: as of 9.2, it should be guaranteed that all PRE_DATA items appear
|
|
|
|
* before DATA items, and all DATA items before POST_DATA items. That is
|
2017-08-19 19:39:37 +02:00
|
|
|
* not certain to be true in older archives, though, and in any case use
|
|
|
|
* of a list file would destroy that ordering (cf. SortTocFromFile). So
|
|
|
|
* this loop cannot assume that it holds.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
AH->restorePass = RESTORE_PASS_MAIN;
|
2011-02-18 19:11:45 +01:00
|
|
|
skipped_some = false;
|
2009-08-08 00:48:34 +02:00
|
|
|
for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
bool do_now = true;
|
|
|
|
|
2011-02-18 19:11:45 +01:00
|
|
|
if (next_work_item->section != SECTION_PRE_DATA)
|
|
|
|
{
|
|
|
|
/* DATA and POST_DATA items are just ignored for now */
|
|
|
|
if (next_work_item->section == SECTION_DATA ||
|
|
|
|
next_work_item->section == SECTION_POST_DATA)
|
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
do_now = false;
|
2011-02-18 19:11:45 +01:00
|
|
|
skipped_some = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* SECTION_NONE items, such as comments, can be processed now
|
|
|
|
* if we are still in the PRE_DATA part of the archive. Once
|
|
|
|
* we've skipped any items, we have to consider whether the
|
|
|
|
* comment's dependencies are satisfied, so skip it for now.
|
|
|
|
*/
|
|
|
|
if (skipped_some)
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
do_now = false;
|
2011-02-18 19:11:45 +01:00
|
|
|
}
|
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* Also skip items that need to be forced into later passes. We need
|
|
|
|
* not set skipped_some in this case, since by assumption no main-pass
|
|
|
|
* items could depend on these.
|
|
|
|
*/
|
|
|
|
if (_tocEntryRestorePass(next_work_item) != RESTORE_PASS_MAIN)
|
|
|
|
do_now = false;
|
|
|
|
|
|
|
|
if (do_now)
|
|
|
|
{
|
|
|
|
/* OK, restore the item and update its dependencies */
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("processing item %d %s %s",
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
next_work_item->dumpId,
|
|
|
|
next_work_item->desc, next_work_item->tag);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
(void) restore_toc_entry(AH, next_work_item, false);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2017-08-19 19:39:37 +02:00
|
|
|
/* Reduce dependencies, but don't move anything to ready_list */
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
reduce_dependencies(AH, next_work_item, NULL);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Nope, so add it to pending_list */
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
pending_list_append(pending_list, next_work_item);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now close parent connection in prep for parallel steps. We do this
|
|
|
|
* mainly to ensure that we don't exceed the specified number of parallel
|
|
|
|
* connections.
|
|
|
|
*/
|
2012-02-16 17:49:20 +01:00
|
|
|
DisconnectDatabase(&AH->public);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* blow away any transient state from the old connection */
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
|
|
|
AH->currUser = NULL;
|
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
|
|
|
AH->currSchema = NULL;
|
|
|
|
if (AH->currTablespace)
|
|
|
|
free(AH->currTablespace);
|
|
|
|
AH->currTablespace = NULL;
|
2019-03-06 18:54:38 +01:00
|
|
|
if (AH->currTableAm)
|
|
|
|
free(AH->currTableAm);
|
|
|
|
AH->currTableAm = NULL;
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Main engine for parallel restore.
|
|
|
|
*
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
* Parallel restore is done in three phases. In this second phase,
|
|
|
|
* we process entries by dispatching them to parallel worker children
|
|
|
|
* (processes on Unix, threads on Windows), each of which connects
|
|
|
|
* separately to the database. Inter-entry dependencies are respected,
|
|
|
|
* and so is the RestorePass multi-pass structure. When we can no longer
|
|
|
|
* make any entries ready to process, we exit. Normally, there will be
|
|
|
|
* nothing left to do; but if there is, the third phase will mop up.
|
2013-03-24 16:27:20 +01:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
|
|
|
|
TocEntry *pending_list)
|
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ParallelReadyList ready_list;
|
2013-03-24 16:27:20 +01:00
|
|
|
TocEntry *next_work_item;
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("entering restore_toc_entries_parallel");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/* Set up ready_list with enough room for all known TocEntrys */
|
|
|
|
ready_list_init(&ready_list, AH->tocCount);
|
|
|
|
|
2009-08-08 00:48:34 +02:00
|
|
|
/*
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
* The pending_list contains all items that we need to restore. Move all
|
|
|
|
* items that are available to process immediately into the ready_list.
|
|
|
|
* After this setup, the pending list is everything that needs to be done
|
|
|
|
* but is blocked by one or more dependencies, while the ready list
|
|
|
|
* contains items that have no remaining dependencies and are OK to
|
|
|
|
* process in the current restore pass.
|
2009-08-08 00:48:34 +02:00
|
|
|
*/
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
AH->restorePass = RESTORE_PASS_MAIN;
|
|
|
|
move_to_ready_list(pending_list, &ready_list, AH->restorePass);
|
2009-08-08 00:48:34 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* main parent loop
|
|
|
|
*
|
|
|
|
* Keep going until there is no worker still running AND there is no work
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
* left to be done. Note invariant: at top of loop, there should always
|
|
|
|
* be at least one worker available to dispatch a job to.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("entering main parallel loop");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
for (;;)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/* Look for an item ready to be dispatched to a worker */
|
2020-08-25 07:24:15 +02:00
|
|
|
next_work_item = pop_next_work_item(&ready_list, pstate);
|
2009-02-02 21:07:37 +01:00
|
|
|
if (next_work_item != NULL)
|
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
/* If not to be restored, don't waste time launching a worker */
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("skipping item %d %s %s",
|
2009-02-02 21:07:37 +01:00
|
|
|
next_work_item->dumpId,
|
|
|
|
next_work_item->desc, next_work_item->tag);
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/* Update its dependencies as though we'd completed it */
|
2009-08-08 00:48:34 +02:00
|
|
|
reduce_dependencies(AH, next_work_item, &ready_list);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/* Loop around to see if anything else can be dispatched */
|
2009-02-02 21:07:37 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("launching item %d %s %s",
|
2013-03-24 16:27:20 +01:00
|
|
|
next_work_item->dumpId,
|
|
|
|
next_work_item->desc, next_work_item->tag);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/* Dispatch to some worker */
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE,
|
|
|
|
mark_restore_job_done, &ready_list);
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
else if (IsEveryWorkerIdle(pstate))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Nothing is ready and no worker is running, so we're done with
|
|
|
|
* the current pass or maybe with the whole process.
|
|
|
|
*/
|
|
|
|
if (AH->restorePass == RESTORE_PASS_LAST)
|
|
|
|
break; /* No more parallel processing is possible */
|
|
|
|
|
|
|
|
/* Advance to next restore pass */
|
|
|
|
AH->restorePass++;
|
|
|
|
/* That probably allows some stuff to be made ready */
|
|
|
|
move_to_ready_list(pending_list, &ready_list, AH->restorePass);
|
|
|
|
/* Loop around to see if anything's now ready */
|
|
|
|
continue;
|
|
|
|
}
|
2013-03-24 16:27:20 +01:00
|
|
|
else
|
2013-03-26 03:52:28 +01:00
|
|
|
{
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* We have nothing ready, but at least one child is working, so
|
|
|
|
* wait for some subjob to finish.
|
|
|
|
*/
|
2013-03-26 03:52:28 +01:00
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
/*
|
|
|
|
* Before dispatching another job, check to see if anything has
|
|
|
|
* finished. We should check every time through the loop so as to
|
|
|
|
* reduce dependencies as soon as possible. If we were unable to
|
|
|
|
* dispatch any job this time through, wait until some worker finishes
|
|
|
|
* (and, hopefully, unblocks some pending item). If we did dispatch
|
|
|
|
* something, continue as soon as there's at least one idle worker.
|
|
|
|
* Note that in either case, there's guaranteed to be at least one
|
|
|
|
* idle worker when we return to the top of the loop. This ensures we
|
|
|
|
* won't block inside DispatchJobForTocEntry, which would be
|
|
|
|
* undesirable: we'd rather postpone dispatching until we see what's
|
|
|
|
* been unblocked by finished jobs.
|
|
|
|
*/
|
|
|
|
WaitForWorkers(AH, pstate,
|
|
|
|
next_work_item ? WFW_ONE_IDLE : WFW_GOT_STATUS);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/* There should now be nothing in ready_list. */
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
Assert(ready_list.first_te > ready_list.last_te);
|
|
|
|
|
|
|
|
ready_list_free(&ready_list);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("finished main parallel loop");
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* Main engine for parallel restore.
|
|
|
|
*
|
|
|
|
* Parallel restore is done in three phases. In this third phase,
|
|
|
|
* we mop up any remaining TOC entries by processing them serially.
|
|
|
|
* This phase normally should have nothing to do, but if we've somehow
|
|
|
|
* gotten stuck due to circular dependencies or some such, this provides
|
|
|
|
* at least some chance of completing the restore successfully.
|
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
static void
|
|
|
|
restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
|
|
|
|
{
|
2016-01-13 23:48:33 +01:00
|
|
|
RestoreOptions *ropt = AH->public.ropt;
|
2013-03-24 16:27:20 +01:00
|
|
|
TocEntry *te;
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("entering restore_toc_entries_postfork");
|
2012-03-20 22:38:11 +01:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Now reconnect the single parent connection.
|
|
|
|
*/
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
ConnectDatabase((Archive *) AH, &ropt->cparams, true);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2016-06-01 22:14:21 +02:00
|
|
|
/* re-establish fixed state */
|
2009-02-02 21:07:37 +01:00
|
|
|
_doSetFixedOutputState(AH);
|
|
|
|
|
|
|
|
/*
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
* Make sure there is no work left due to, say, circular dependencies, or
|
|
|
|
* some other pathological condition. If so, do it in the single parent
|
|
|
|
* connection. We don't sweat about RestorePass ordering; it's likely we
|
|
|
|
* already violated that.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
for (te = pending_list->pending_next; te != pending_list; te = te->pending_next)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("processing missed item %d %s %s",
|
2009-08-08 00:48:34 +02:00
|
|
|
te->dumpId, te->desc, te->tag);
|
2016-01-13 23:48:33 +01:00
|
|
|
(void) restore_toc_entry(AH, te, false);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-12 23:02:44 +02:00
|
|
|
/*
|
|
|
|
* Check if te1 has an exclusive lock requirement for an item that te2 also
|
|
|
|
* requires, whether or not te2's requirement is for an exclusive lock.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
has_lock_conflicts(TocEntry *te1, TocEntry *te2)
|
|
|
|
{
|
|
|
|
int j,
|
|
|
|
k;
|
|
|
|
|
|
|
|
for (j = 0; j < te1->nLockDeps; j++)
|
|
|
|
{
|
|
|
|
for (k = 0; k < te2->nDeps; k++)
|
|
|
|
{
|
|
|
|
if (te1->lockDeps[j] == te2->dependencies[k])
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-08-08 00:48:34 +02:00
|
|
|
/*
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
* Initialize the header of the pending-items list.
|
2009-08-08 00:48:34 +02:00
|
|
|
*
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
* This is a circular list with a dummy TocEntry as header, just like the
|
2009-08-08 00:48:34 +02:00
|
|
|
* main TOC list; but we use separate list links so that an entry can be in
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
* the main TOC list as well as in the pending list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pending_list_header_init(TocEntry *l)
|
|
|
|
{
|
|
|
|
l->pending_prev = l->pending_next = l;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Append te to the end of the pending-list headed by l */
|
|
|
|
static void
|
|
|
|
pending_list_append(TocEntry *l, TocEntry *te)
|
|
|
|
{
|
|
|
|
te->pending_prev = l->pending_prev;
|
|
|
|
l->pending_prev->pending_next = te;
|
|
|
|
l->pending_prev = te;
|
|
|
|
te->pending_next = l;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove te from the pending-list */
|
|
|
|
static void
|
|
|
|
pending_list_remove(TocEntry *te)
|
|
|
|
{
|
|
|
|
te->pending_prev->pending_next = te->pending_next;
|
|
|
|
te->pending_next->pending_prev = te->pending_prev;
|
|
|
|
te->pending_prev = NULL;
|
|
|
|
te->pending_next = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the ready_list with enough room for up to tocCount entries.
|
2009-08-08 00:48:34 +02:00
|
|
|
*/
|
|
|
|
static void
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ready_list_init(ParallelReadyList *ready_list, int tocCount)
|
2009-08-08 00:48:34 +02:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ready_list->tes = (TocEntry **)
|
|
|
|
pg_malloc(tocCount * sizeof(TocEntry *));
|
|
|
|
ready_list->first_te = 0;
|
|
|
|
ready_list->last_te = -1;
|
|
|
|
ready_list->sorted = false;
|
2009-08-08 00:48:34 +02:00
|
|
|
}
|
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/*
|
|
|
|
* Free storage for a ready_list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ready_list_free(ParallelReadyList *ready_list)
|
|
|
|
{
|
|
|
|
pg_free(ready_list->tes);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add te to the ready_list */
|
2009-08-08 00:48:34 +02:00
|
|
|
static void
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ready_list_insert(ParallelReadyList *ready_list, TocEntry *te)
|
2009-08-08 00:48:34 +02:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ready_list->tes[++ready_list->last_te] = te;
|
|
|
|
/* List is (probably) not sorted anymore. */
|
|
|
|
ready_list->sorted = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the i'th entry in the ready_list */
|
|
|
|
static void
|
|
|
|
ready_list_remove(ParallelReadyList *ready_list, int i)
|
|
|
|
{
|
|
|
|
int f = ready_list->first_te;
|
|
|
|
|
|
|
|
Assert(i >= f && i <= ready_list->last_te);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the typical case where the item to be removed is the first ready
|
|
|
|
* entry, we need only increment first_te to remove it. Otherwise, move
|
|
|
|
* the entries before it to compact the list. (This preserves sortedness,
|
|
|
|
* if any.) We could alternatively move the entries after i, but there
|
|
|
|
* are typically many more of those.
|
|
|
|
*/
|
|
|
|
if (i > f)
|
|
|
|
{
|
|
|
|
TocEntry **first_te_ptr = &ready_list->tes[f];
|
|
|
|
|
|
|
|
memmove(first_te_ptr + 1, first_te_ptr, (i - f) * sizeof(TocEntry *));
|
|
|
|
}
|
|
|
|
ready_list->first_te++;
|
2009-08-08 00:48:34 +02:00
|
|
|
}
|
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/* Sort the ready_list into the desired order */
|
2009-08-08 00:48:34 +02:00
|
|
|
static void
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ready_list_sort(ParallelReadyList *ready_list)
|
2009-08-08 00:48:34 +02:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
if (!ready_list->sorted)
|
|
|
|
{
|
|
|
|
int n = ready_list->last_te - ready_list->first_te + 1;
|
|
|
|
|
|
|
|
if (n > 1)
|
|
|
|
qsort(ready_list->tes + ready_list->first_te, n,
|
|
|
|
sizeof(TocEntry *),
|
|
|
|
TocEntrySizeCompare);
|
|
|
|
ready_list->sorted = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* qsort comparator for sorting TocEntries by dataLength */
|
|
|
|
static int
|
|
|
|
TocEntrySizeCompare(const void *p1, const void *p2)
|
|
|
|
{
|
|
|
|
const TocEntry *te1 = *(const TocEntry *const *) p1;
|
|
|
|
const TocEntry *te2 = *(const TocEntry *const *) p2;
|
|
|
|
|
|
|
|
/* Sort by decreasing dataLength */
|
|
|
|
if (te1->dataLength > te2->dataLength)
|
|
|
|
return -1;
|
|
|
|
if (te1->dataLength < te2->dataLength)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* For equal dataLengths, sort by dumpId, just to be stable */
|
|
|
|
if (te1->dumpId < te2->dumpId)
|
|
|
|
return -1;
|
|
|
|
if (te1->dumpId > te2->dumpId)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
2009-08-08 00:48:34 +02:00
|
|
|
}
|
|
|
|
|
2009-04-12 23:02:44 +02:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/*
|
|
|
|
* Move all immediately-ready items from pending_list to ready_list.
|
|
|
|
*
|
|
|
|
* Items are considered ready if they have no remaining dependencies and
|
|
|
|
* they belong in the current restore pass. (See also reduce_dependencies,
|
|
|
|
* which applies the same logic one-at-a-time.)
|
|
|
|
*/
|
|
|
|
static void
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
move_to_ready_list(TocEntry *pending_list,
|
|
|
|
ParallelReadyList *ready_list,
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
RestorePass pass)
|
|
|
|
{
|
|
|
|
TocEntry *te;
|
|
|
|
TocEntry *next_te;
|
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
for (te = pending_list->pending_next; te != pending_list; te = next_te)
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/* must save list link before possibly removing te from list */
|
|
|
|
next_te = te->pending_next;
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
|
|
|
|
if (te->depCount == 0 &&
|
|
|
|
_tocEntryRestorePass(te) == pass)
|
|
|
|
{
|
|
|
|
/* Remove it from pending_list ... */
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
pending_list_remove(te);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
/* ... and add to ready_list */
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ready_list_insert(ready_list, te);
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
* Find the next work item (if any) that is capable of being run now,
|
|
|
|
* and remove it from the ready_list.
|
|
|
|
*
|
|
|
|
* Returns the item, or NULL if nothing is runnable.
|
2009-02-02 21:07:37 +01:00
|
|
|
*
|
|
|
|
* To qualify, the item must have no remaining dependencies
|
2009-08-08 00:48:34 +02:00
|
|
|
* and no requirements for locks that are incompatible with
|
|
|
|
* items currently running. Items in the ready_list are known to have
|
|
|
|
* no remaining dependencies, but we have to check for lock conflicts.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
static TocEntry *
|
2020-08-25 07:24:15 +02:00
|
|
|
pop_next_work_item(ParallelReadyList *ready_list,
|
2013-03-24 16:27:20 +01:00
|
|
|
ParallelState *pstate)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/*
|
|
|
|
* Sort the ready_list so that we'll tackle larger jobs first.
|
|
|
|
*/
|
|
|
|
ready_list_sort(ready_list);
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
2009-08-08 00:48:34 +02:00
|
|
|
* Search the ready_list until we find a suitable item.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
for (int i = ready_list->first_te; i <= ready_list->last_te; i++)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
TocEntry *te = ready_list->tes[i];
|
2009-02-02 21:07:37 +01:00
|
|
|
bool conflicts = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if the item would need exclusive lock on something
|
2009-04-12 23:02:44 +02:00
|
|
|
* that a currently running item also needs lock on, or vice versa. If
|
|
|
|
* so, we don't want to schedule them together.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
for (int k = 0; k < pstate->numWorkers; k++)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
TocEntry *running_te = pstate->te[k];
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2016-09-27 20:29:12 +02:00
|
|
|
if (running_te == NULL)
|
2009-02-02 21:07:37 +01:00
|
|
|
continue;
|
2009-04-12 23:02:44 +02:00
|
|
|
if (has_lock_conflicts(te, running_te) ||
|
|
|
|
has_lock_conflicts(running_te, te))
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2009-04-12 23:02:44 +02:00
|
|
|
conflicts = true;
|
|
|
|
break;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conflicts)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* passed all tests, so this item can run */
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ready_list_remove(ready_list, i);
|
2009-02-02 21:07:37 +01:00
|
|
|
return te;
|
|
|
|
}
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("no item ready");
|
2009-02-02 21:07:37 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore a single TOC item in parallel with others
|
|
|
|
*
|
2013-03-24 16:27:20 +01:00
|
|
|
* this is run in the worker, i.e. in a thread (Windows) or a separate process
|
|
|
|
* (everything else). A worker process executes several such work items during
|
|
|
|
* a parallel backup or restore. Once we terminate here and report back that
|
2020-06-14 23:22:47 +02:00
|
|
|
* our work is finished, the leader process will assign us a new work item.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
int
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
parallel_restore(ArchiveHandle *AH, TocEntry *te)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2013-03-24 16:27:20 +01:00
|
|
|
int status;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
Assert(AH->connection != NULL);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2016-06-01 22:14:21 +02:00
|
|
|
/* Count only errors associated with this TOC entry */
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->public.n_errors = 0;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/* Restore the TOC item */
|
2016-01-13 23:48:33 +01:00
|
|
|
status = restore_toc_entry(AH, te, true);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
return status;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2020-06-14 23:22:47 +02:00
|
|
|
* Callback function that's invoked in the leader process after a step has
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
* been parallel restored.
|
2009-02-02 21:07:37 +01:00
|
|
|
*
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
* Update status and reduce the dependency count of any dependent items.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
static void
|
Redesign parallel dump/restore's wait-for-workers logic.
The ListenToWorkers/ReapWorkerStatus APIs were messy and hard to use.
Instead, make DispatchJobForTocEntry register a callback function that
will take care of state cleanup, doing whatever had been done by the caller
of ReapWorkerStatus in the old design. (This callback is essentially just
the old mark_work_done function in the restore case, and a trivial test for
worker failure in the dump case.) Then we can have ListenToWorkers call
the callback immediately on receipt of a status message, and return the
worker to WRKR_IDLE state; so the WRKR_FINISHED state goes away.
This allows us to design a unified wait-for-worker-messages loop:
WaitForWorkers replaces EnsureIdleWorker and EnsureWorkersFinished as well
as the mess in restore_toc_entries_parallel. Also, we no longer need the
fragile API spec that the caller of DispatchJobForTocEntry is responsible
for ensuring there's an idle worker, since DispatchJobForTocEntry can just
wait until there is one.
In passing, I got rid of the ParallelArgs struct, which was a net negative
in terms of notational verboseness, and didn't seem to be providing any
noticeable amount of abstraction either.
Tom Lane, reviewed by Kevin Grittner
Discussion: <1188.1464544443@sss.pgh.pa.us>
2016-09-27 19:22:39 +02:00
|
|
|
mark_restore_job_done(ArchiveHandle *AH,
|
|
|
|
TocEntry *te,
|
|
|
|
int status,
|
|
|
|
void *callback_data)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ParallelReadyList *ready_list = (ParallelReadyList *) callback_data;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("finished item %d %s %s",
|
2009-02-02 21:07:37 +01:00
|
|
|
te->dumpId, te->desc, te->tag);
|
|
|
|
|
|
|
|
if (status == WORKER_CREATE_DONE)
|
|
|
|
mark_create_done(AH, te);
|
|
|
|
else if (status == WORKER_INHIBIT_DATA)
|
|
|
|
{
|
|
|
|
inhibit_data_for_failed_table(AH, te);
|
|
|
|
AH->public.n_errors++;
|
|
|
|
}
|
|
|
|
else if (status == WORKER_IGNORED_ERRORS)
|
|
|
|
AH->public.n_errors++;
|
|
|
|
else if (status != 0)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
fatal("worker process failed: exit code %d",
|
2012-03-20 22:38:11 +01:00
|
|
|
status);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2009-08-08 00:48:34 +02:00
|
|
|
reduce_dependencies(AH, te, ready_list);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process the dependency information into a form useful for parallel restore.
|
|
|
|
*
|
2010-12-09 19:03:11 +01:00
|
|
|
* This function takes care of fixing up some missing or badly designed
|
|
|
|
* dependencies, and then prepares subsidiary data structures that will be
|
|
|
|
* used in the main parallel-restore logic, including:
|
2012-05-29 02:38:28 +02:00
|
|
|
* 1. We build the revDeps[] arrays of incoming dependency dumpIds.
|
|
|
|
* 2. We set up depCount fields that are the number of as-yet-unprocessed
|
2009-02-02 21:07:37 +01:00
|
|
|
* dependencies for each TOC entry.
|
|
|
|
*
|
|
|
|
* We also identify locking dependencies so that we can avoid trying to
|
|
|
|
* schedule conflicting items at the same time.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fix_dependencies(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
TocEntry *te;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
2012-05-29 02:38:28 +02:00
|
|
|
* Initialize the depCount/revDeps/nRevDeps fields, and make sure the TOC
|
|
|
|
* items are marked as not being in any parallel-processing list.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
te->depCount = te->nDeps;
|
2010-12-09 19:03:11 +01:00
|
|
|
te->revDeps = NULL;
|
|
|
|
te->nRevDeps = 0;
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
te->pending_prev = NULL;
|
|
|
|
te->pending_next = NULL;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* POST_DATA items that are shown as depending on a table need to be
|
|
|
|
* re-pointed to depend on that table's data, instead. This ensures they
|
2012-05-29 02:38:28 +02:00
|
|
|
* won't get scheduled until the data has been loaded.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2012-05-29 02:38:28 +02:00
|
|
|
repoint_table_dependencies(AH);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pre-8.4 versions of pg_dump neglected to set up a dependency from BLOB
|
|
|
|
* COMMENTS to BLOBS. Cope. (We assume there's only one BLOBS and only
|
|
|
|
* one BLOB COMMENTS in such files.)
|
|
|
|
*/
|
|
|
|
if (AH->version < K_VERS_1_11)
|
|
|
|
{
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if (strcmp(te->desc, "BLOB COMMENTS") == 0 && te->nDeps == 0)
|
|
|
|
{
|
|
|
|
TocEntry *te2;
|
|
|
|
|
|
|
|
for (te2 = AH->toc->next; te2 != AH->toc; te2 = te2->next)
|
|
|
|
{
|
|
|
|
if (strcmp(te2->desc, "BLOBS") == 0)
|
|
|
|
{
|
2011-11-25 21:40:51 +01:00
|
|
|
te->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
|
2009-02-02 21:07:37 +01:00
|
|
|
te->dependencies[0] = te2->dumpId;
|
|
|
|
te->nDeps++;
|
|
|
|
te->depCount++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-12-09 19:03:11 +01:00
|
|
|
* At this point we start to build the revDeps reverse-dependency arrays,
|
|
|
|
* so all changes of dependencies must be complete.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Count the incoming dependencies for each item. Also, it is possible
|
Make pg_dump emit more accurate dependency information.
While pg_dump has included dependency information in archive-format output
ever since 7.3, it never made any large effort to ensure that that
information was actually useful. In particular, in common situations where
dependency chains include objects that aren't separately emitted in the
dump, the dependencies shown for objects that were emitted would reference
the dump IDs of these un-dumped objects, leaving no clue about which other
objects the visible objects indirectly depend on. So far, parallel
pg_restore has managed to avoid tripping over this misfeature, but only
by dint of some crude hacks like not trusting dependency information in
the pre-data section of the archive.
It seems prudent to do something about this before it rises up to bite us,
so instead of emitting the "raw" dependencies of each dumped object,
recursively search for its actual dependencies among the subset of objects
that are being dumped.
Back-patch to 9.2, since that code hasn't yet diverged materially from
HEAD. At some point we might need to back-patch further, but right now
there are no known cases where this is actively necessary. (The one known
case, bug #6699, is fixed in a different way by my previous patch.) Since
this patch depends on 9.2 changes that made TOC entries be marked before
output commences as to whether they'll be dumped, back-patching further
would require additional surgery; and as of now there's no evidence that
it's worth the risk.
2012-06-26 03:20:24 +02:00
|
|
|
* that the dependencies list items that are not in the archive at all
|
2013-03-24 16:27:20 +01:00
|
|
|
* (that should not happen in 9.2 and later, but is highly likely in older
|
|
|
|
* archives). Subtract such items from the depCounts.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
{
|
2010-01-19 19:39:19 +01:00
|
|
|
DumpId depid = te->dependencies[i];
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
|
|
|
|
AH->tocsByDumpId[depid]->nRevDeps++;
|
2010-12-09 19:03:11 +01:00
|
|
|
else
|
2009-02-02 21:07:37 +01:00
|
|
|
te->depCount--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-09 19:03:11 +01:00
|
|
|
/*
|
|
|
|
* Allocate space for revDeps[] arrays, and reset nRevDeps so we can use
|
|
|
|
* it as a counter below.
|
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if (te->nRevDeps > 0)
|
2011-11-25 21:40:51 +01:00
|
|
|
te->revDeps = (DumpId *) pg_malloc(te->nRevDeps * sizeof(DumpId));
|
2010-12-09 19:03:11 +01:00
|
|
|
te->nRevDeps = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build the revDeps[] arrays of incoming-dependency dumpIds. This had
|
|
|
|
* better agree with the loops above.
|
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
{
|
|
|
|
DumpId depid = te->dependencies[i];
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
|
2010-12-09 19:03:11 +01:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
TocEntry *otherte = AH->tocsByDumpId[depid];
|
2010-12-09 19:03:11 +01:00
|
|
|
|
|
|
|
otherte->revDeps[otherte->nRevDeps++] = te->dumpId;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Lastly, work out the locking dependencies.
|
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
te->lockDeps = NULL;
|
|
|
|
te->nLockDeps = 0;
|
2012-05-29 02:38:28 +02:00
|
|
|
identify_locking_dependencies(AH, te);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-05-29 02:38:28 +02:00
|
|
|
* Change dependencies on table items to depend on table data items instead,
|
2009-02-02 21:07:37 +01:00
|
|
|
* but only in POST_DATA items.
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
*
|
|
|
|
* Also, for any item having such dependency(s), set its dataLength to the
|
|
|
|
* largest dataLength of the table data items it depends on. This ensures
|
|
|
|
* that parallel restore will prioritize larger jobs (index builds, FK
|
|
|
|
* constraint checks, etc) over smaller ones, avoiding situations where we
|
|
|
|
* end a restore with only one active job working on a large table.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
static void
|
2012-05-29 02:38:28 +02:00
|
|
|
repoint_table_dependencies(ArchiveHandle *AH)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
TocEntry *te;
|
|
|
|
int i;
|
2012-05-29 02:38:28 +02:00
|
|
|
DumpId olddep;
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
if (te->section != SECTION_POST_DATA)
|
|
|
|
continue;
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
olddep = te->dependencies[i];
|
|
|
|
if (olddep <= AH->maxDumpId &&
|
|
|
|
AH->tableDataId[olddep] != 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
DumpId tabledataid = AH->tableDataId[olddep];
|
|
|
|
TocEntry *tabledatate = AH->tocsByDumpId[tabledataid];
|
|
|
|
|
|
|
|
te->dependencies[i] = tabledataid;
|
|
|
|
te->dataLength = Max(te->dataLength, tabledatate->dataLength);
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("transferring dependency %d -> %d to %d",
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
te->dumpId, olddep, tabledataid);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Identify which objects we'll need exclusive lock on in order to restore
|
|
|
|
* the given TOC entry (*other* than the one identified by the TOC entry
|
|
|
|
* itself). Record their dump IDs in the entry's lockDeps[] array.
|
|
|
|
*/
|
|
|
|
static void
|
2012-05-29 02:38:28 +02:00
|
|
|
identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
DumpId *lockids;
|
|
|
|
int nlockids;
|
|
|
|
int i;
|
|
|
|
|
2018-08-29 01:46:59 +02:00
|
|
|
/*
|
|
|
|
* We only care about this for POST_DATA items. PRE_DATA items are not
|
|
|
|
* run in parallel, and DATA items are all independent by assumption.
|
|
|
|
*/
|
|
|
|
if (te->section != SECTION_POST_DATA)
|
|
|
|
return;
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/* Quick exit if no dependencies at all */
|
|
|
|
if (te->nDeps == 0)
|
|
|
|
return;
|
|
|
|
|
2018-08-29 01:46:59 +02:00
|
|
|
/*
|
|
|
|
* Most POST_DATA items are ALTER TABLEs or some moral equivalent of that,
|
|
|
|
* and hence require exclusive lock. However, we know that CREATE INDEX
|
|
|
|
* does not. (Maybe someday index-creating CONSTRAINTs will fall in that
|
|
|
|
* category too ... but today is not that day.)
|
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "INDEX") == 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
2014-09-26 17:21:35 +02:00
|
|
|
* We assume the entry requires exclusive lock on each TABLE or TABLE DATA
|
|
|
|
* item listed among its dependencies. Originally all of these would have
|
|
|
|
* been TABLE items, but repoint_table_dependencies would have repointed
|
|
|
|
* them to the TABLE DATA items if those are present (which they might not
|
|
|
|
* be, eg in a schema-only dump). Note that all of the entries we are
|
|
|
|
* processing here are POST_DATA; otherwise there might be a significant
|
|
|
|
* difference between a dependency on a table and a dependency on its
|
|
|
|
* data, so that closer analysis would be needed here.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
2011-11-25 21:40:51 +01:00
|
|
|
lockids = (DumpId *) pg_malloc(te->nDeps * sizeof(DumpId));
|
2009-02-02 21:07:37 +01:00
|
|
|
nlockids = 0;
|
|
|
|
for (i = 0; i < te->nDeps; i++)
|
|
|
|
{
|
|
|
|
DumpId depid = te->dependencies[i];
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL &&
|
2014-09-26 17:21:35 +02:00
|
|
|
((strcmp(AH->tocsByDumpId[depid]->desc, "TABLE DATA") == 0) ||
|
|
|
|
strcmp(AH->tocsByDumpId[depid]->desc, "TABLE") == 0))
|
2009-02-02 21:07:37 +01:00
|
|
|
lockids[nlockids++] = depid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nlockids == 0)
|
|
|
|
{
|
|
|
|
free(lockids);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-11-30 02:41:06 +01:00
|
|
|
te->lockDeps = pg_realloc(lockids, nlockids * sizeof(DumpId));
|
2009-02-02 21:07:37 +01:00
|
|
|
te->nLockDeps = nlockids;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the specified TOC entry from the depCounts of items that depend on
|
2009-08-08 00:48:34 +02:00
|
|
|
* it, thereby possibly making them ready-to-run. Any pending item that
|
2017-08-19 19:39:37 +02:00
|
|
|
* becomes ready should be moved to the ready_list, if that's provided.
|
2009-02-02 21:07:37 +01:00
|
|
|
*/
|
|
|
|
static void
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
reduce_dependencies(ArchiveHandle *AH, TocEntry *te,
|
|
|
|
ParallelReadyList *ready_list)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("reducing dependencies for %d", te->dumpId);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2010-12-09 19:03:11 +01:00
|
|
|
for (i = 0; i < te->nRevDeps; i++)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
TocEntry *otherte = AH->tocsByDumpId[te->revDeps[i]];
|
2010-12-09 19:03:11 +01:00
|
|
|
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
Assert(otherte->depCount > 0);
|
2010-12-09 19:03:11 +01:00
|
|
|
otherte->depCount--;
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
|
|
|
|
/*
|
2017-08-19 19:39:37 +02:00
|
|
|
* It's ready if it has no remaining dependencies, and it belongs in
|
|
|
|
* the current restore pass, and it is currently a member of the
|
|
|
|
* pending list (that check is needed to prevent double restore in
|
|
|
|
* some cases where a list-file forces out-of-order restoring).
|
|
|
|
* However, if ready_list == NULL then caller doesn't want any list
|
|
|
|
* memberships changed.
|
Fix pg_dump/pg_restore to emit REFRESH MATERIALIZED VIEW commands last.
Because we push all ACL (i.e. GRANT/REVOKE) restore steps to the end,
materialized view refreshes were occurring while the permissions on
referenced objects were still at defaults. This led to failures if,
say, an MV owned by user A reads from a table owned by user B, even
if B had granted the necessary privileges to A. We've had multiple
complaints about that type of restore failure, most recently from
Jordan Gigov.
The ideal fix for this would be to start treating ACLs as dependency-
sortable objects, rather than hard-wiring anything about their dump order
(the existing approach is a messy kluge dating to commit dc0e76ca3).
But that's going to be a rather major change, and it certainly wouldn't
lead to a back-patchable fix. As a short-term solution, convert the
existing two-pass hack (ie, normal objects then ACLs) to a three-pass hack,
ie, normal objects then ACLs then matview refreshes. Because this happens
in RestoreArchive(), it will also fix the problem when restoring from an
existing archive-format dump.
(Note this means that if a matview refresh would have failed under the
permissions prevailing at dump time, it'll fail during restore as well.
We'll define that as user error rather than something we should try
to work around.)
To avoid performance loss in parallel restore, we need the matview
refreshes to still be parallelizable. Hence, clean things up enough
so that both ACLs and matviews are handled by the parallel restore
infrastructure, instead of reverting back to serial restore for ACLs.
There is still a final serial step, but it shouldn't normally have to
do anything; it's only there to try to recover if we get stuck due to
some problem like unresolved circular dependencies.
Patch by me, but it owes something to an earlier attempt by Kevin Grittner.
Back-patch to 9.3 where materialized views were introduced.
Discussion: https://postgr.es/m/28572.1500912583@sss.pgh.pa.us
2017-08-03 23:36:23 +02:00
|
|
|
*/
|
|
|
|
if (otherte->depCount == 0 &&
|
|
|
|
_tocEntryRestorePass(otherte) == AH->restorePass &&
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
otherte->pending_prev != NULL &&
|
2017-08-19 19:39:37 +02:00
|
|
|
ready_list != NULL)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2017-08-19 19:39:37 +02:00
|
|
|
/* Remove it from pending list ... */
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
pending_list_remove(otherte);
|
2010-12-09 19:03:11 +01:00
|
|
|
/* ... and add to ready_list */
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
ready_list_insert(ready_list, otherte);
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the created flag on the DATA member corresponding to the given
|
|
|
|
* TABLE member
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mark_create_done(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
if (AH->tableDataId[te->dumpId] != 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
2012-05-29 02:38:28 +02:00
|
|
|
TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
|
|
|
|
|
|
|
|
ted->created = true;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark the DATA member corresponding to the given TABLE member
|
|
|
|
* as not wanted
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("table \"%s\" could not be created, will not restore its data",
|
2009-02-02 21:07:37 +01:00
|
|
|
te->tag);
|
|
|
|
|
2012-05-29 02:38:28 +02:00
|
|
|
if (AH->tableDataId[te->dumpId] != 0)
|
2009-02-02 21:07:37 +01:00
|
|
|
{
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
|
|
|
|
|
|
|
|
ted->reqs = 0;
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clone and de-clone routines used in parallel restoration.
|
|
|
|
*
|
|
|
|
* Enough of the structure is cloned to ensure that there is no
|
|
|
|
* conflict between different threads each with their own clone.
|
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
ArchiveHandle *
|
2009-02-02 21:07:37 +01:00
|
|
|
CloneArchive(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
ArchiveHandle *clone;
|
|
|
|
|
|
|
|
/* Make a "flat" copy */
|
2011-11-25 21:40:51 +01:00
|
|
|
clone = (ArchiveHandle *) pg_malloc(sizeof(ArchiveHandle));
|
2009-02-02 21:07:37 +01:00
|
|
|
memcpy(clone, AH, sizeof(ArchiveHandle));
|
|
|
|
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
/* Handle format-independent fields */
|
|
|
|
memset(&(clone->sqlparse), 0, sizeof(clone->sqlparse));
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* The clone will have its own connection, so disregard connection state */
|
|
|
|
clone->connection = NULL;
|
Redesign handling of SIGTERM/control-C in parallel pg_dump/pg_restore.
Formerly, Unix builds of pg_dump/pg_restore would trap SIGINT and similar
signals and set a flag that was tested in various data-transfer loops.
This was prone to errors of omission (cf commit 3c8aa6654); and even if
the client-side response was prompt, we did nothing that would cause
long-running SQL commands (e.g. CREATE INDEX) to terminate early.
Also, the master process would effectively do nothing at all upon receipt
of SIGINT; the only reason it seemed to work was that in typical scenarios
the signal would also be delivered to the child processes. We should
support termination when a signal is delivered only to the master process,
though.
Windows builds had no console interrupt handler, so they would just fall
over immediately at control-C, again leaving long-running SQL commands to
finish unmolested.
To fix, remove the flag-checking approach altogether. Instead, allow the
Unix signal handler to send a cancel request directly and then exit(1).
In the master process, also have it forward the signal to the children.
On Windows, add a console interrupt handler that behaves approximately
the same. The main difference is that a single execution of the Windows
handler can send all the cancel requests since all the info is available
in one process, whereas on Unix each process sends a cancel only for its
own database connection.
In passing, fix an old problem that DisconnectDatabase tends to send a
cancel request before exiting a parallel worker, even if nothing went
wrong. This is at least a waste of cycles, and could lead to unexpected
log messages, or maybe even data loss if it happened in pg_restore (though
in the current code the problem seems to affect only pg_dump). The cause
was that after a COPY step, pg_dump was leaving libpq in PGASYNC_BUSY
state, causing PQtransactionStatus() to report PQTRANS_ACTIVE. That's
normally harmless because the next PQexec() will silently clear the
PGASYNC_BUSY state; but in a parallel worker we might exit without any
additional SQL commands after a COPY step. So add an extra PQgetResult()
call after a COPY to allow libpq to return to PGASYNC_IDLE state.
This is a bug fix, IMO, so back-patch to 9.3 where parallel dump/restore
were introduced.
Thanks to Kyotaro Horiguchi for Windows testing and code suggestions.
Original-Patch: <7005.1464657274@sss.pgh.pa.us>
Discussion: <20160602.174941.256342236.horiguchi.kyotaro@lab.ntt.co.jp>
2016-06-02 19:27:53 +02:00
|
|
|
clone->connCancel = NULL;
|
2009-02-02 21:07:37 +01:00
|
|
|
clone->currUser = NULL;
|
|
|
|
clone->currSchema = NULL;
|
|
|
|
clone->currTablespace = NULL;
|
|
|
|
|
|
|
|
/* savedPassword must be local in case we change it while connecting */
|
|
|
|
if (clone->savedPassword)
|
2011-11-25 21:40:51 +01:00
|
|
|
clone->savedPassword = pg_strdup(clone->savedPassword);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* clone has its own error count, too */
|
|
|
|
clone->public.n_errors = 0;
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
* Connect our new clone object to the database, using the same connection
|
|
|
|
* parameters used for the original connection.
|
2013-03-24 16:27:20 +01:00
|
|
|
*/
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
ConnectDatabase((Archive *) clone, &clone->public.ropt->cparams, true);
|
2016-06-01 22:14:21 +02:00
|
|
|
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
/* re-establish fixed state */
|
|
|
|
if (AH->mode == archModeRead)
|
2016-06-01 22:14:21 +02:00
|
|
|
_doSetFixedOutputState(clone);
|
Fix handling of -d "connection string" in pg_dump/pg_restore.
Parallel pg_dump failed if its -d parameter was a connection string
containing any essential information other than host, port, or username.
The same was true for pg_restore with --create.
The reason is that these scenarios failed to preserve the connection
string from the command line; the code felt free to replace that with
just the database name when reconnecting from a pg_dump parallel worker
or after creating the target database. By chance, parallel pg_restore
did not suffer this defect, as long as you didn't say --create.
In practice it seems that the error would be obvious only if the
connstring included essential, non-default SSL or GSS parameters.
This may explain why it took us so long to notice. (It also makes
it very difficult to craft a regression test case illustrating the
problem, since the test would fail in builds without those options.)
Fix by refactoring so that ConnectDatabase always receives all the
relevant options directly from the command line, rather than
reconstructed values. Inject a different database name, when necessary,
by relying on libpq's rules for handling multiple "dbname" parameters.
While here, let's get rid of the essentially duplicate _connectDB
function, as well as some obsolete nearby cruft.
Per bug #16604 from Zsolt Ero. Back-patch to all supported branches.
Discussion: https://postgr.es/m/16604-933f4b8791227b15@postgresql.org
2020-09-25 00:19:38 +02:00
|
|
|
/* in write case, setupDumpWorker will fix up connection state */
|
2013-03-24 16:27:20 +01:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/* Let the format-specific code have a chance too */
|
2017-09-07 18:06:23 +02:00
|
|
|
clone->ClonePtr(clone);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
Assert(clone->connection != NULL);
|
2009-02-02 21:07:37 +01:00
|
|
|
return clone;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release clone-local storage.
|
|
|
|
*
|
|
|
|
* Note: we assume any clone-local connection was already closed.
|
|
|
|
*/
|
2013-03-24 16:27:20 +01:00
|
|
|
void
|
2009-02-02 21:07:37 +01:00
|
|
|
DeCloneArchive(ArchiveHandle *AH)
|
|
|
|
{
|
Redesign handling of SIGTERM/control-C in parallel pg_dump/pg_restore.
Formerly, Unix builds of pg_dump/pg_restore would trap SIGINT and similar
signals and set a flag that was tested in various data-transfer loops.
This was prone to errors of omission (cf commit 3c8aa6654); and even if
the client-side response was prompt, we did nothing that would cause
long-running SQL commands (e.g. CREATE INDEX) to terminate early.
Also, the master process would effectively do nothing at all upon receipt
of SIGINT; the only reason it seemed to work was that in typical scenarios
the signal would also be delivered to the child processes. We should
support termination when a signal is delivered only to the master process,
though.
Windows builds had no console interrupt handler, so they would just fall
over immediately at control-C, again leaving long-running SQL commands to
finish unmolested.
To fix, remove the flag-checking approach altogether. Instead, allow the
Unix signal handler to send a cancel request directly and then exit(1).
In the master process, also have it forward the signal to the children.
On Windows, add a console interrupt handler that behaves approximately
the same. The main difference is that a single execution of the Windows
handler can send all the cancel requests since all the info is available
in one process, whereas on Unix each process sends a cancel only for its
own database connection.
In passing, fix an old problem that DisconnectDatabase tends to send a
cancel request before exiting a parallel worker, even if nothing went
wrong. This is at least a waste of cycles, and could lead to unexpected
log messages, or maybe even data loss if it happened in pg_restore (though
in the current code the problem seems to affect only pg_dump). The cause
was that after a COPY step, pg_dump was leaving libpq in PGASYNC_BUSY
state, causing PQtransactionStatus() to report PQTRANS_ACTIVE. That's
normally harmless because the next PQexec() will silently clear the
PGASYNC_BUSY state; but in a parallel worker we might exit without any
additional SQL commands after a COPY step. So add an extra PQgetResult()
call after a COPY to allow libpq to return to PGASYNC_IDLE state.
This is a bug fix, IMO, so back-patch to 9.3 where parallel dump/restore
were introduced.
Thanks to Kyotaro Horiguchi for Windows testing and code suggestions.
Original-Patch: <7005.1464657274@sss.pgh.pa.us>
Discussion: <20160602.174941.256342236.horiguchi.kyotaro@lab.ntt.co.jp>
2016-06-02 19:27:53 +02:00
|
|
|
/* Should not have an open database connection */
|
|
|
|
Assert(AH->connection == NULL);
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/* Clear format-specific state */
|
2017-09-07 18:06:23 +02:00
|
|
|
AH->DeClonePtr(AH);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
/* Clear state allocated by CloneArchive */
|
|
|
|
if (AH->sqlparse.curCmd)
|
|
|
|
destroyPQExpBuffer(AH->sqlparse.curCmd);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
/* Clear any connection-local state */
|
|
|
|
if (AH->currUser)
|
|
|
|
free(AH->currUser);
|
|
|
|
if (AH->currSchema)
|
|
|
|
free(AH->currSchema);
|
|
|
|
if (AH->currTablespace)
|
|
|
|
free(AH->currTablespace);
|
2019-03-06 18:54:38 +01:00
|
|
|
if (AH->currTableAm)
|
|
|
|
free(AH->currTableAm);
|
2009-02-02 21:07:37 +01:00
|
|
|
if (AH->savedPassword)
|
|
|
|
free(AH->savedPassword);
|
|
|
|
|
|
|
|
free(AH);
|
|
|
|
}
|