2000-07-21 13:40:08 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_backup_custom.c
|
|
|
|
*
|
|
|
|
* Implements the custom output format.
|
|
|
|
*
|
2018-03-29 19:10:04 +02:00
|
|
|
* The comments with the routines in this code are a good place to
|
2000-07-21 13:40:08 +02:00
|
|
|
* understand how to write a new format.
|
|
|
|
*
|
|
|
|
* See the headers to pg_restore for more details.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2000, Philip Warner
|
|
|
|
* Rights are granted to use this software in any way so long
|
|
|
|
* as this notice is not removed.
|
|
|
|
*
|
|
|
|
* The author is not responsible for loss or damages that may
|
|
|
|
* and any liability will be limited to the time taken to fix any
|
|
|
|
* related bug.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/bin/pg_dump/pg_backup_custom.c
|
2001-01-12 05:32:07 +01:00
|
|
|
*
|
2000-07-21 13:40:08 +02:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2014-10-14 20:00:55 +02:00
|
|
|
#include "postgres_fe.h"
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2019-10-23 06:08:53 +02:00
|
|
|
#include "common/file_utils.h"
|
2010-12-02 20:39:03 +01:00
|
|
|
#include "compress_io.h"
|
2013-03-24 16:27:20 +01:00
|
|
|
#include "parallel.h"
|
2013-03-27 17:10:40 +01:00
|
|
|
#include "pg_backup_utils.h"
|
2001-02-10 03:31:31 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*--------
|
|
|
|
* Routines in the format interface
|
|
|
|
*--------
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _StartData(ArchiveHandle *AH, TocEntry *te);
|
2014-05-06 02:27:16 +02:00
|
|
|
static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
|
2000-07-21 13:40:08 +02:00
|
|
|
static void _EndData(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static int _WriteByte(ArchiveHandle *AH, const int i);
|
2022-09-23 01:41:23 +02:00
|
|
|
static int _ReadByte(ArchiveHandle *AH);
|
2014-05-06 02:27:16 +02:00
|
|
|
static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
|
|
|
|
static void _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
|
2016-01-13 23:48:33 +01:00
|
|
|
static void _CloseArchive(ArchiveHandle *AH);
|
2009-02-02 21:07:37 +01:00
|
|
|
static void _ReopenArchive(ArchiveHandle *AH);
|
2016-01-13 23:48:33 +01:00
|
|
|
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te);
|
2000-07-21 13:40:08 +02:00
|
|
|
static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
static void _PrintData(ArchiveHandle *AH);
|
|
|
|
static void _skipData(ArchiveHandle *AH);
|
2022-12-05 08:52:11 +01:00
|
|
|
static void _skipLOs(ArchiveHandle *AH);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
static void _StartLOs(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
|
|
static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
|
|
static void _EndLOs(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _LoadLOs(ArchiveHandle *AH, bool drop);
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
|
|
|
|
static void _PrepParallelRestore(ArchiveHandle *AH);
|
2009-02-02 21:07:37 +01:00
|
|
|
static void _Clone(ArchiveHandle *AH);
|
|
|
|
static void _DeClone(ArchiveHandle *AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
static int _WorkerJobRestoreCustom(ArchiveHandle *AH, TocEntry *te);
|
2013-03-24 16:27:20 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
typedef struct
|
|
|
|
{
|
2010-12-02 20:39:03 +01:00
|
|
|
CompressorState *cs;
|
2000-07-21 13:40:08 +02:00
|
|
|
int hasSeek;
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
/* lastFilePos is used only when reading, and may be invalid if !hasSeek */
|
|
|
|
pgoff_t lastFilePos; /* position after last data block we've read */
|
2000-07-21 13:40:08 +02:00
|
|
|
} lclContext;
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
2002-10-22 21:15:23 +02:00
|
|
|
int dataState;
|
2020-07-17 18:14:28 +02:00
|
|
|
pgoff_t dataPos; /* valid only if dataState=K_OFFSET_POS_SET */
|
2000-07-21 13:40:08 +02:00
|
|
|
} lclTocEntry;
|
|
|
|
|
|
|
|
|
|
|
|
/*------
|
|
|
|
* Static declarations
|
|
|
|
*------
|
|
|
|
*/
|
|
|
|
static void _readBlockHeader(ArchiveHandle *AH, int *type, int *id);
|
2007-02-19 16:05:06 +01:00
|
|
|
static pgoff_t _getFilePos(ArchiveHandle *AH, lclContext *ctx);
|
2010-12-02 20:39:03 +01:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
static void _CustomWriteFunc(ArchiveHandle *AH, const char *buf, size_t len);
|
2010-12-02 20:39:03 +01:00
|
|
|
static size_t _CustomReadFunc(ArchiveHandle *AH, char **buf, size_t *buflen);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Init routine required by ALL formats. This is a global routine
|
|
|
|
* and should be declared in pg_backup_archiver.h
|
|
|
|
*
|
|
|
|
* It's task is to create any extra archive context (using AH->formatData),
|
|
|
|
* and to initialize the supported function pointers.
|
|
|
|
*
|
2023-02-09 06:43:53 +01:00
|
|
|
* It should also prepare whatever its input source is for reading/writing,
|
2000-07-21 13:40:08 +02:00
|
|
|
* and in the case of a read mode connection, it should load the Header & TOC.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
InitArchiveFmt_Custom(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/* Assuming static functions, this can be copied for each format. */
|
|
|
|
AH->ArchiveEntryPtr = _ArchiveEntry;
|
|
|
|
AH->StartDataPtr = _StartData;
|
|
|
|
AH->WriteDataPtr = _WriteData;
|
|
|
|
AH->EndDataPtr = _EndData;
|
|
|
|
AH->WriteBytePtr = _WriteByte;
|
|
|
|
AH->ReadBytePtr = _ReadByte;
|
|
|
|
AH->WriteBufPtr = _WriteBuf;
|
|
|
|
AH->ReadBufPtr = _ReadBuf;
|
|
|
|
AH->ClosePtr = _CloseArchive;
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->ReopenPtr = _ReopenArchive;
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->PrintTocDataPtr = _PrintTocData;
|
|
|
|
AH->ReadExtraTocPtr = _ReadExtraToc;
|
|
|
|
AH->WriteExtraTocPtr = _WriteExtraToc;
|
|
|
|
AH->PrintExtraTocPtr = _PrintExtraToc;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
AH->StartLOsPtr = _StartLOs;
|
|
|
|
AH->StartLOPtr = _StartLO;
|
|
|
|
AH->EndLOPtr = _EndLO;
|
|
|
|
AH->EndLOsPtr = _EndLOs;
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
|
|
|
|
AH->PrepParallelRestorePtr = _PrepParallelRestore;
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->ClonePtr = _Clone;
|
|
|
|
AH->DeClonePtr = _DeClone;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/* no parallel dump in the custom archive, only parallel restore */
|
|
|
|
AH->WorkerJobDumpPtr = NULL;
|
|
|
|
AH->WorkerJobRestorePtr = _WorkerJobRestoreCustom;
|
|
|
|
|
2010-12-02 20:39:03 +01:00
|
|
|
/* Set up a private area. */
|
2012-10-02 21:35:10 +02:00
|
|
|
ctx = (lclContext *) pg_malloc0(sizeof(lclContext));
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->formatData = (void *) ctx;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
|
|
|
* Now open the file
|
|
|
|
*/
|
|
|
|
if (AH->mode == archModeWrite)
|
|
|
|
{
|
|
|
|
if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->FH = fopen(AH->fSpec, PG_BINARY_W);
|
2007-10-28 22:55:52 +01:00
|
|
|
if (!AH->FH)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open output file \"%s\": %m", AH->fSpec);
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->FH = stdout;
|
2007-10-28 22:55:52 +01:00
|
|
|
if (!AH->FH)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open output file: %m");
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2002-10-25 03:33:17 +02:00
|
|
|
ctx->hasSeek = checkSeek(AH->FH);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->FH = fopen(AH->fSpec, PG_BINARY_R);
|
2007-10-28 22:55:52 +01:00
|
|
|
if (!AH->FH)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->FH = stdin;
|
2007-10-28 22:55:52 +01:00
|
|
|
if (!AH->FH)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open input file: %m");
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2002-10-25 03:33:17 +02:00
|
|
|
ctx->hasSeek = checkSeek(AH->FH);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
ReadHead(AH);
|
|
|
|
ReadToc(AH);
|
|
|
|
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
/*
|
|
|
|
* Remember location of first data block (i.e., the point after TOC)
|
|
|
|
* in case we have to search for desired data blocks.
|
|
|
|
*/
|
|
|
|
ctx->lastFilePos = _getFilePos(AH, ctx);
|
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the Archiver when the dumper creates a new TOC entry.
|
|
|
|
*
|
|
|
|
* Optional.
|
|
|
|
*
|
2017-02-06 10:33:58 +01:00
|
|
|
* Set up extract format-related TOC data.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *ctx;
|
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
ctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
2000-07-21 13:40:08 +02:00
|
|
|
if (te->dataDumper)
|
2002-10-22 21:15:23 +02:00
|
|
|
ctx->dataState = K_OFFSET_POS_NOT_SET;
|
2000-07-21 13:40:08 +02:00
|
|
|
else
|
2002-10-22 21:15:23 +02:00
|
|
|
ctx->dataState = K_OFFSET_NO_DATA;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
te->formatData = (void *) ctx;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the Archiver to save any extra format-related TOC entry
|
|
|
|
* data.
|
|
|
|
*
|
|
|
|
* Optional.
|
|
|
|
*
|
|
|
|
* Use the Archiver routines to write data - they are non-endian, and
|
|
|
|
* maintain other important file information.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_WriteExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
WriteOffset(AH, ctx->dataPos, ctx->dataState);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the Archiver to read any extra format-related TOC data.
|
|
|
|
*
|
|
|
|
* Optional.
|
|
|
|
*
|
2012-04-24 04:43:09 +02:00
|
|
|
* Needs to match the order defined in _WriteExtraToc, and should also
|
2000-07-21 13:40:08 +02:00
|
|
|
* use the Archiver input routines.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
if (ctx == NULL)
|
|
|
|
{
|
2012-10-02 21:35:10 +02:00
|
|
|
ctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
2000-07-21 13:40:08 +02:00
|
|
|
te->formatData = (void *) ctx;
|
|
|
|
}
|
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
ctx->dataState = ReadOffset(AH, &(ctx->dataPos));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prior to V1.7 (pg7.3), we dumped the data size as an int now we don't
|
|
|
|
* dump it at all.
|
|
|
|
*/
|
|
|
|
if (AH->version < K_VERS_1_7)
|
2011-04-11 21:28:45 +02:00
|
|
|
ReadInt(AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the Archiver when restoring an archive to output a comment
|
|
|
|
* that includes useful information about the TOC entry.
|
|
|
|
*
|
|
|
|
* Optional.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_PrintExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
2004-03-03 22:28:55 +01:00
|
|
|
if (AH->public.verbose)
|
|
|
|
ahprintf(AH, "-- Data Pos: " INT64_FORMAT "\n",
|
|
|
|
(int64) ctx->dataPos);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the archiver when saving TABLE DATA (not schema). This routine
|
|
|
|
* should save whatever format-specific information is needed to read
|
|
|
|
* the archive back.
|
|
|
|
*
|
|
|
|
* It is called just prior to the dumper's 'DataDumper' routine being called.
|
|
|
|
*
|
|
|
|
* Optional, but strongly recommended.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_StartData(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
tctx->dataPos = _getFilePos(AH, ctx);
|
2020-07-17 18:14:28 +02:00
|
|
|
if (tctx->dataPos >= 0)
|
|
|
|
tctx->dataState = K_OFFSET_POS_SET;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
_WriteByte(AH, BLK_DATA); /* Block type */
|
2003-12-06 04:00:16 +01:00
|
|
|
WriteInt(AH, te->dumpId); /* For sanity check */
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
ctx->cs = AllocateCompressor(AH->compression_spec,
|
|
|
|
NULL,
|
|
|
|
_CustomWriteFunc);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by archiver when dumper calls WriteData. This routine is
|
2022-12-05 08:52:11 +01:00
|
|
|
* called for both LO and table data; it is the responsibility of
|
|
|
|
* the format to manage each kind of data using StartLO/StartData.
|
2000-07-21 13:40:08 +02:00
|
|
|
*
|
2005-06-21 22:45:44 +02:00
|
|
|
* It should only be called from within a DataDumper routine.
|
2000-07-21 13:40:08 +02:00
|
|
|
*
|
|
|
|
* Mandatory.
|
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2002-08-20 19:54:45 +02:00
|
|
|
_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2010-12-02 20:39:03 +01:00
|
|
|
CompressorState *cs = ctx->cs;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
if (dLen > 0)
|
2023-02-23 18:33:30 +01:00
|
|
|
/* writeData() internally throws write errors */
|
|
|
|
cs->writeData(AH, cs, data, dLen);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the archiver when a dumper's 'DataDumper' routine has
|
|
|
|
* finished.
|
|
|
|
*
|
2023-02-23 18:33:30 +01:00
|
|
|
* Mandatory.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_EndData(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
2010-12-02 20:39:03 +01:00
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2010-12-02 20:39:03 +01:00
|
|
|
EndCompressor(AH, ctx->cs);
|
2023-02-23 18:33:30 +01:00
|
|
|
ctx->cs = NULL;
|
|
|
|
|
2010-12-02 20:39:03 +01:00
|
|
|
/* Send the end marker */
|
|
|
|
WriteInt(AH, 0);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Rearrange pg_dump's handling of large objects for better efficiency.
Commit c0d5be5d6 caused pg_dump to create a separate BLOB metadata TOC
entry for each large object (blob), but it did not touch the ancient
decision to put all the blobs' data into a single "BLOBS" TOC entry.
This is bad for a few reasons: for databases with millions of blobs,
the TOC becomes unreasonably large, causing performance issues;
selective restore of just some blobs is quite impossible; and we
cannot parallelize either dump or restore of the blob data, since our
architecture for that relies on farming out whole TOC entries to
worker processes.
To improve matters, let's group multiple blobs into each blob metadata
TOC entry, and then make corresponding per-group blob data TOC entries.
Selective restore using pg_restore's -l/-L switches is then possible,
though only at the group level. (Perhaps we should provide a switch
to allow forcing one-blob-per-group for users who need precise
selective restore and don't have huge numbers of blobs. This patch
doesn't do that, instead just hard-wiring the maximum number of blobs
per entry at 1000.)
The blobs in a group must all have the same owner, since the TOC entry
format only allows one owner to be named. In this implementation
we also require them to all share the same ACL (grants); the archive
format wouldn't require that, but pg_dump's representation of
DumpableObjects does. It seems unlikely that either restriction
will be problematic for databases with huge numbers of blobs.
The metadata TOC entries now have a "desc" string of "BLOB METADATA",
and their "defn" string is just a newline-separated list of blob OIDs.
The restore code has to generate creation commands, ALTER OWNER
commands, and drop commands (for --clean mode) from that. We would
need special-case code for ALTER OWNER and drop in any case, so the
alternative of keeping the "defn" as directly executable SQL code
for creation wouldn't buy much, and it seems like it'd bloat the
archive to little purpose.
Since we require the blobs of a metadata group to share the same ACL,
we can furthermore store only one copy of that ACL, and then make
pg_restore regenerate the appropriate commands for each blob. This
saves space in the dump file not only by removing duplicative SQL
command strings, but by not needing a separate TOC entry for each
blob's ACL. In turn, that reduces client-side memory requirements for
handling many blobs.
ACL TOC entries that need this special processing are labeled as
"ACL"/"LARGE OBJECTS nnn..nnn". If we have a blob with a unique ACL,
continue to label it as "ACL"/"LARGE OBJECT nnn". We don't actually
have to make such a distinction, but it saves a few cycles during
restore for the easy case, and it seems like a good idea to not change
the TOC contents unnecessarily.
The data TOC entries ("BLOBS") are exactly the same as before,
except that now there can be more than one, so we'd better give them
identifying tag strings.
Also, commit c0d5be5d6 put the new BLOB metadata TOC entries into
SECTION_PRE_DATA, which perhaps is defensible in some ways, but
it's a rather odd choice considering that we go out of our way to
treat blobs as data. Moreover, because parallel restore handles
the PRE_DATA section serially, this means we'd only get part of the
parallelism speedup we could hope for. Move these entries into
SECTION_DATA, letting us parallelize the lo_create calls not just the
data loading when there are many blobs. Add dependencies to ensure
that we won't try to load data for a blob we've not yet created.
As this stands, we still generate a separate TOC entry for any comment
or security label attached to a blob. I feel comfortable in believing
that comments and security labels on blobs are rare, so this patch
should be enough to get most of the useful TOC compression for blobs.
We have to bump the archive file format version number, since existing
versions of pg_restore wouldn't know they need to do something special
for BLOB METADATA, plus they aren't going to work correctly with
multiple BLOBS entries or multiple-large-object ACL entries.
The directory and tar-file format handlers need some work
for multiple BLOBS entries: they used to hard-wire the file name
as "blobs.toc", which is replaced here with "blobs_<dumpid>.toc".
The 002_pg_dump.pl test script also knows about that and requires
minor updates. (I had to drop the test for manually-compressed
blobs.toc files with LZ4, because lz4's obtuse command line
design requires explicit specification of the output file name
which seems impractical here. I don't think we're losing any
useful test coverage thereby; that test stanza seems completely
duplicative with the gzip and zstd cases anyway.)
In passing, centralize management of the lo_buf used to hold data
while restoring blobs. The code previously had each format handler
create lo_buf, which seems rather pointless given that the format
handlers all make it the same way. Moreover, the format handlers
never use lo_buf directly, making this setup a failure from a
separation-of-concerns standpoint. Let's move the responsibility into
pg_backup_archiver.c, which is the only module concerned with lo_buf.
The reason to do this in this patch is that it allows a centralized
fix for the now-false assumption that we never restore blobs in
parallel. Also, get rid of dead code in DropLOIfExists: it's been a
long time since we had any need to be able to restore to a pre-9.0
server.
Discussion: https://postgr.es/m/a9f9376f1c3343a6bb319dce294e20ac@EX13D05UWC001.ant.amazon.com
2024-04-01 22:25:56 +02:00
|
|
|
* Called by the archiver when starting to save BLOB DATA (not schema).
|
2000-07-21 13:40:08 +02:00
|
|
|
* This routine should save whatever format-specific information is needed
|
2022-12-05 08:52:11 +01:00
|
|
|
* to read the LOs back into memory.
|
2000-07-21 13:40:08 +02:00
|
|
|
*
|
|
|
|
* It is called just prior to the dumper's DataDumper routine.
|
|
|
|
*
|
|
|
|
* Optional, but strongly recommended.
|
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_StartLOs(ArchiveHandle *AH, TocEntry *te)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
tctx->dataPos = _getFilePos(AH, ctx);
|
2020-07-17 18:14:28 +02:00
|
|
|
if (tctx->dataPos >= 0)
|
|
|
|
tctx->dataState = K_OFFSET_POS_SET;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
_WriteByte(AH, BLK_BLOBS); /* Block type */
|
2003-12-06 04:00:16 +01:00
|
|
|
WriteInt(AH, te->dumpId); /* For sanity check */
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Called by the archiver when the dumper calls StartLO.
|
2000-07-21 13:40:08 +02:00
|
|
|
*
|
|
|
|
* Mandatory.
|
|
|
|
*
|
|
|
|
* Must save the passed OID for retrieval at restore-time.
|
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2010-12-02 20:39:03 +01:00
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
if (oid == 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("invalid OID for large object");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
WriteInt(AH, oid);
|
2010-12-02 20:39:03 +01:00
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
ctx->cs = AllocateCompressor(AH->compression_spec,
|
|
|
|
NULL,
|
|
|
|
_CustomWriteFunc);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Called by the archiver when the dumper calls EndLO.
|
2000-07-21 13:40:08 +02:00
|
|
|
*
|
|
|
|
* Optional.
|
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2010-12-02 20:39:03 +01:00
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
|
|
|
EndCompressor(AH, ctx->cs);
|
|
|
|
/* Send the end marker */
|
|
|
|
WriteInt(AH, 0);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Rearrange pg_dump's handling of large objects for better efficiency.
Commit c0d5be5d6 caused pg_dump to create a separate BLOB metadata TOC
entry for each large object (blob), but it did not touch the ancient
decision to put all the blobs' data into a single "BLOBS" TOC entry.
This is bad for a few reasons: for databases with millions of blobs,
the TOC becomes unreasonably large, causing performance issues;
selective restore of just some blobs is quite impossible; and we
cannot parallelize either dump or restore of the blob data, since our
architecture for that relies on farming out whole TOC entries to
worker processes.
To improve matters, let's group multiple blobs into each blob metadata
TOC entry, and then make corresponding per-group blob data TOC entries.
Selective restore using pg_restore's -l/-L switches is then possible,
though only at the group level. (Perhaps we should provide a switch
to allow forcing one-blob-per-group for users who need precise
selective restore and don't have huge numbers of blobs. This patch
doesn't do that, instead just hard-wiring the maximum number of blobs
per entry at 1000.)
The blobs in a group must all have the same owner, since the TOC entry
format only allows one owner to be named. In this implementation
we also require them to all share the same ACL (grants); the archive
format wouldn't require that, but pg_dump's representation of
DumpableObjects does. It seems unlikely that either restriction
will be problematic for databases with huge numbers of blobs.
The metadata TOC entries now have a "desc" string of "BLOB METADATA",
and their "defn" string is just a newline-separated list of blob OIDs.
The restore code has to generate creation commands, ALTER OWNER
commands, and drop commands (for --clean mode) from that. We would
need special-case code for ALTER OWNER and drop in any case, so the
alternative of keeping the "defn" as directly executable SQL code
for creation wouldn't buy much, and it seems like it'd bloat the
archive to little purpose.
Since we require the blobs of a metadata group to share the same ACL,
we can furthermore store only one copy of that ACL, and then make
pg_restore regenerate the appropriate commands for each blob. This
saves space in the dump file not only by removing duplicative SQL
command strings, but by not needing a separate TOC entry for each
blob's ACL. In turn, that reduces client-side memory requirements for
handling many blobs.
ACL TOC entries that need this special processing are labeled as
"ACL"/"LARGE OBJECTS nnn..nnn". If we have a blob with a unique ACL,
continue to label it as "ACL"/"LARGE OBJECT nnn". We don't actually
have to make such a distinction, but it saves a few cycles during
restore for the easy case, and it seems like a good idea to not change
the TOC contents unnecessarily.
The data TOC entries ("BLOBS") are exactly the same as before,
except that now there can be more than one, so we'd better give them
identifying tag strings.
Also, commit c0d5be5d6 put the new BLOB metadata TOC entries into
SECTION_PRE_DATA, which perhaps is defensible in some ways, but
it's a rather odd choice considering that we go out of our way to
treat blobs as data. Moreover, because parallel restore handles
the PRE_DATA section serially, this means we'd only get part of the
parallelism speedup we could hope for. Move these entries into
SECTION_DATA, letting us parallelize the lo_create calls not just the
data loading when there are many blobs. Add dependencies to ensure
that we won't try to load data for a blob we've not yet created.
As this stands, we still generate a separate TOC entry for any comment
or security label attached to a blob. I feel comfortable in believing
that comments and security labels on blobs are rare, so this patch
should be enough to get most of the useful TOC compression for blobs.
We have to bump the archive file format version number, since existing
versions of pg_restore wouldn't know they need to do something special
for BLOB METADATA, plus they aren't going to work correctly with
multiple BLOBS entries or multiple-large-object ACL entries.
The directory and tar-file format handlers need some work
for multiple BLOBS entries: they used to hard-wire the file name
as "blobs.toc", which is replaced here with "blobs_<dumpid>.toc".
The 002_pg_dump.pl test script also knows about that and requires
minor updates. (I had to drop the test for manually-compressed
blobs.toc files with LZ4, because lz4's obtuse command line
design requires explicit specification of the output file name
which seems impractical here. I don't think we're losing any
useful test coverage thereby; that test stanza seems completely
duplicative with the gzip and zstd cases anyway.)
In passing, centralize management of the lo_buf used to hold data
while restoring blobs. The code previously had each format handler
create lo_buf, which seems rather pointless given that the format
handlers all make it the same way. Moreover, the format handlers
never use lo_buf directly, making this setup a failure from a
separation-of-concerns standpoint. Let's move the responsibility into
pg_backup_archiver.c, which is the only module concerned with lo_buf.
The reason to do this in this patch is that it allows a centralized
fix for the now-false assumption that we never restore blobs in
parallel. Also, get rid of dead code in DropLOIfExists: it's been a
long time since we had any need to be able to restore to a pre-9.0
server.
Discussion: https://postgr.es/m/a9f9376f1c3343a6bb319dce294e20ac@EX13D05UWC001.ant.amazon.com
2024-04-01 22:25:56 +02:00
|
|
|
* Called by the archiver when finishing saving BLOB DATA.
|
2000-07-21 13:40:08 +02:00
|
|
|
*
|
|
|
|
* Optional.
|
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_EndLOs(ArchiveHandle *AH, TocEntry *te)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2022-12-05 08:52:11 +01:00
|
|
|
/* Write out a fake zero OID to mark end-of-LOs. */
|
2000-07-21 13:40:08 +02:00
|
|
|
WriteInt(AH, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-03-03 22:28:55 +01:00
|
|
|
* Print data for a given TOC entry
|
|
|
|
*/
|
2000-07-21 13:40:08 +02:00
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_PrintTocData(ArchiveHandle *AH, TocEntry *te)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
int blkType;
|
2010-06-27 21:07:24 +02:00
|
|
|
int id;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
if (tctx->dataState == K_OFFSET_NO_DATA)
|
2000-07-21 13:40:08 +02:00
|
|
|
return;
|
|
|
|
|
2002-10-22 21:15:23 +02:00
|
|
|
if (!ctx->hasSeek || tctx->dataState == K_OFFSET_POS_NOT_SET)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2010-06-27 21:07:24 +02:00
|
|
|
/*
|
|
|
|
* We cannot seek directly to the desired block. Instead, skip over
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
* block headers until we find the one we want. Remember the
|
|
|
|
* positions of skipped-over blocks, so that if we later decide we
|
|
|
|
* need to read one, we'll be able to seek to it.
|
|
|
|
*
|
|
|
|
* When our input file is seekable, we can do the search starting from
|
|
|
|
* the point after the last data block we scanned in previous
|
|
|
|
* iterations of this function.
|
2010-06-27 21:07:24 +02:00
|
|
|
*/
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
if (ctx->hasSeek)
|
|
|
|
{
|
|
|
|
if (fseeko(AH->FH, ctx->lastFilePos, SEEK_SET) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("error during file seek: %m");
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
for (;;)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
pgoff_t thisBlkPos = _getFilePos(AH, ctx);
|
|
|
|
|
|
|
|
_readBlockHeader(AH, &blkType, &id);
|
|
|
|
|
|
|
|
if (blkType == EOF || id == te->dumpId)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Remember the block position, if we got one */
|
|
|
|
if (thisBlkPos >= 0)
|
|
|
|
{
|
|
|
|
TocEntry *otherte = getTocEntryByDumpId(AH, id);
|
|
|
|
|
|
|
|
if (otherte && otherte->formatData)
|
|
|
|
{
|
|
|
|
lclTocEntry *othertctx = (lclTocEntry *) otherte->formatData;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: on Windows, multiple threads might access/update
|
|
|
|
* the same lclTocEntry concurrently, but that should be
|
|
|
|
* safe as long as we update dataPos before dataState.
|
|
|
|
* Ideally, we'd use pg_write_barrier() to enforce that,
|
|
|
|
* but the needed infrastructure doesn't exist in frontend
|
|
|
|
* code. But Windows only runs on machines with strong
|
|
|
|
* store ordering, so it should be okay for now.
|
|
|
|
*/
|
|
|
|
if (othertctx->dataState == K_OFFSET_POS_NOT_SET)
|
|
|
|
{
|
|
|
|
othertctx->dataPos = thisBlkPos;
|
|
|
|
othertctx->dataState = K_OFFSET_POS_SET;
|
|
|
|
}
|
|
|
|
else if (othertctx->dataPos != thisBlkPos ||
|
|
|
|
othertctx->dataState != K_OFFSET_POS_SET)
|
|
|
|
{
|
|
|
|
/* sanity check */
|
|
|
|
pg_log_warning("data block %d has wrong seek position",
|
|
|
|
id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
switch (blkType)
|
|
|
|
{
|
|
|
|
case BLK_DATA:
|
|
|
|
_skipData(AH);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BLK_BLOBS:
|
2022-12-05 08:52:11 +01:00
|
|
|
_skipLOs(AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
default: /* Always have a default */
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("unrecognized data block type (%d) while searching archive",
|
|
|
|
blkType);
|
2000-07-21 13:40:08 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-06-27 21:07:24 +02:00
|
|
|
/* We can just seek to the place we need to be. */
|
2002-08-20 19:54:45 +02:00
|
|
|
if (fseeko(AH->FH, tctx->dataPos, SEEK_SET) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("error during file seek: %m");
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
_readBlockHeader(AH, &blkType, &id);
|
|
|
|
}
|
|
|
|
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
/*
|
|
|
|
* If we reached EOF without finding the block we want, then either it
|
|
|
|
* doesn't exist, or it does but we lack the ability to seek back to it.
|
|
|
|
*/
|
2010-06-27 21:07:24 +02:00
|
|
|
if (blkType == EOF)
|
|
|
|
{
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
if (!ctx->hasSeek)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not find block ID %d in archive -- "
|
|
|
|
"possibly due to out-of-order restore request, "
|
|
|
|
"which cannot be handled due to non-seekable input file",
|
|
|
|
te->dumpId);
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
else
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not find block ID %d in archive -- "
|
|
|
|
"possibly corrupt archive",
|
|
|
|
te->dumpId);
|
2010-06-27 21:07:24 +02:00
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/* Are we sane? */
|
2003-12-06 04:00:16 +01:00
|
|
|
if (id != te->dumpId)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("found unexpected block ID (%d) when reading data -- expected %d",
|
|
|
|
id, te->dumpId);
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
switch (blkType)
|
|
|
|
{
|
|
|
|
case BLK_DATA:
|
|
|
|
_PrintData(AH);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BLK_BLOBS:
|
2022-12-05 08:52:11 +01:00
|
|
|
_LoadLOs(AH, AH->public.ropt->dropSchema);
|
2000-07-21 13:40:08 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
default: /* Always have a default */
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("unrecognized data block type %d while restoring archive",
|
|
|
|
blkType);
|
2000-07-21 13:40:08 +02:00
|
|
|
break;
|
|
|
|
}
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If our input file is seekable but lacks data offsets, update our
|
|
|
|
* knowledge of where to start future searches from. (Note that we did
|
|
|
|
* not update the current TE's dataState/dataPos. We could have, but
|
|
|
|
* there is no point since it will not be visited again.)
|
|
|
|
*/
|
|
|
|
if (ctx->hasSeek && tctx->dataState == K_OFFSET_POS_NOT_SET)
|
|
|
|
{
|
|
|
|
pgoff_t curPos = _getFilePos(AH, ctx);
|
|
|
|
|
|
|
|
if (curPos > ctx->lastFilePos)
|
|
|
|
ctx->lastFilePos = curPos;
|
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print data from current file position.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_PrintData(ArchiveHandle *AH)
|
|
|
|
{
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressorState *cs;
|
|
|
|
|
|
|
|
cs = AllocateCompressor(AH->compression_spec,
|
|
|
|
_CustomReadFunc, NULL);
|
|
|
|
cs->readData(AH, cs);
|
|
|
|
EndCompressor(AH, cs);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_LoadLOs(ArchiveHandle *AH, bool drop)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2002-05-29 03:38:56 +02:00
|
|
|
Oid oid;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
StartRestoreLOs(AH);
|
2000-10-31 15:20:30 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
oid = ReadInt(AH);
|
|
|
|
while (oid != 0)
|
|
|
|
{
|
2022-12-05 08:52:11 +01:00
|
|
|
StartRestoreLO(AH, oid, drop);
|
2000-07-21 13:40:08 +02:00
|
|
|
_PrintData(AH);
|
2022-12-05 08:52:11 +01:00
|
|
|
EndRestoreLO(AH, oid);
|
2000-07-21 13:40:08 +02:00
|
|
|
oid = ReadInt(AH);
|
|
|
|
}
|
2000-10-31 15:20:30 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
EndRestoreLOs(AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Skip the LOs from the current file position.
|
|
|
|
* LOs are written sequentially as data blocks (see below).
|
|
|
|
* Each LO is preceded by its original OID.
|
|
|
|
* A zero OID indicates the end of the LOs.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_skipLOs(ArchiveHandle *AH)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2002-05-29 03:38:56 +02:00
|
|
|
Oid oid;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
oid = ReadInt(AH);
|
|
|
|
while (oid != 0)
|
|
|
|
{
|
2000-10-31 15:20:30 +01:00
|
|
|
_skipData(AH);
|
|
|
|
oid = ReadInt(AH);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip data from current file position.
|
|
|
|
* Data blocks are formatted as an integer length, followed by data.
|
2020-06-12 14:05:10 +02:00
|
|
|
* A zero length indicates the end of the block.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_skipData(ArchiveHandle *AH)
|
|
|
|
{
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t blkLen;
|
2010-12-02 20:39:03 +01:00
|
|
|
char *buf = NULL;
|
|
|
|
int buflen = 0;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
blkLen = ReadInt(AH);
|
|
|
|
while (blkLen != 0)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
if (ctx->hasSeek)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
if (fseeko(AH->FH, blkLen, SEEK_CUR) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("error during file seek: %m");
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
else
|
2006-05-22 13:21:54 +02:00
|
|
|
{
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
if (blkLen > buflen)
|
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(buf);
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
buf = (char *) pg_malloc(blkLen);
|
|
|
|
buflen = blkLen;
|
|
|
|
}
|
2020-09-05 19:17:32 +02:00
|
|
|
if (fread(buf, 1, blkLen, AH->FH) != blkLen)
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
{
|
|
|
|
if (feof(AH->FH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not read from input file: end of file");
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
else
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not read from input file: %m");
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
}
|
2006-05-22 13:21:54 +02:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
blkLen = ReadInt(AH);
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2010-12-02 20:39:03 +01:00
|
|
|
|
2022-06-16 21:50:56 +02:00
|
|
|
free(buf);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a byte of data to the archive.
|
|
|
|
*
|
|
|
|
* Mandatory.
|
|
|
|
*
|
|
|
|
* Called by the archiver to do integer & byte output to the archive.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
_WriteByte(ArchiveHandle *AH, const int i)
|
|
|
|
{
|
2020-09-05 19:17:32 +02:00
|
|
|
if (fputc(i, AH->FH) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
WRITE_ERROR_EXIT;
|
|
|
|
|
|
|
|
return 1;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a byte of data from the archive.
|
|
|
|
*
|
|
|
|
* Mandatory
|
|
|
|
*
|
|
|
|
* Called by the archiver to read bytes & integers from the archive.
|
2007-08-06 03:38:15 +02:00
|
|
|
* EOF should be treated as a fatal error.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
_ReadByte(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
2007-08-06 03:38:15 +02:00
|
|
|
res = getc(AH->FH);
|
|
|
|
if (res == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
READ_ERROR_EXIT(AH->FH);
|
2000-07-21 13:40:08 +02:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a buffer of data to the archive.
|
|
|
|
*
|
|
|
|
* Mandatory.
|
|
|
|
*
|
|
|
|
* Called by the archiver to write a block of bytes to the archive.
|
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2002-08-20 19:54:45 +02:00
|
|
|
_WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2014-05-06 02:27:16 +02:00
|
|
|
if (fwrite(buf, 1, len, AH->FH) != len)
|
|
|
|
WRITE_ERROR_EXIT;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a block of bytes from the archive.
|
|
|
|
*
|
|
|
|
* Mandatory.
|
|
|
|
*
|
|
|
|
* Called by the archiver to read a block of bytes from the archive
|
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2002-08-20 19:54:45 +02:00
|
|
|
_ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2014-05-06 02:27:16 +02:00
|
|
|
if (fread(buf, 1, len, AH->FH) != len)
|
|
|
|
READ_ERROR_EXIT(AH->FH);
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Close the archive.
|
|
|
|
*
|
|
|
|
* Mandatory.
|
|
|
|
*
|
|
|
|
* When writing the archive, this is the routine that actually starts
|
|
|
|
* the process of saving it to files. No data should be written prior
|
|
|
|
* to this point, since the user could sort the TOC after creating it.
|
|
|
|
*
|
2014-10-14 20:00:55 +02:00
|
|
|
* If an archive is to be written, this routine must call:
|
2000-07-21 13:40:08 +02:00
|
|
|
* WriteHead to save the archive header
|
|
|
|
* WriteToc to save the TOC entries
|
2022-12-05 08:52:11 +01:00
|
|
|
* WriteDataChunks to save all data & LOs.
|
2000-07-21 13:40:08 +02:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_CloseArchive(ArchiveHandle *AH)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2007-02-19 16:05:06 +01:00
|
|
|
pgoff_t tpos;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
if (AH->mode == archModeWrite)
|
|
|
|
{
|
|
|
|
WriteHead(AH);
|
2014-05-05 17:26:41 +02:00
|
|
|
/* Remember TOC's seek position for use below */
|
2002-08-20 19:54:45 +02:00
|
|
|
tpos = ftello(AH->FH);
|
2014-05-05 17:26:41 +02:00
|
|
|
if (tpos < 0 && ctx->hasSeek)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not determine seek position in archive file: %m");
|
2000-07-21 13:40:08 +02:00
|
|
|
WriteToc(AH);
|
2016-01-13 23:48:33 +01:00
|
|
|
WriteDataChunks(AH, NULL);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*
|
2010-06-28 04:07:02 +02:00
|
|
|
* If possible, re-write the TOC in order to update the data offset
|
|
|
|
* information. This is not essential, as pg_restore can cope in most
|
|
|
|
* cases without it; but it can make pg_restore significantly faster
|
|
|
|
* in some situations (especially parallel restore).
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
2009-02-02 21:07:37 +01:00
|
|
|
if (ctx->hasSeek &&
|
|
|
|
fseeko(AH->FH, tpos, SEEK_SET) == 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
WriteToc(AH);
|
|
|
|
}
|
|
|
|
|
2001-01-12 05:32:07 +01:00
|
|
|
if (fclose(AH->FH) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close archive file: %m");
|
2001-01-12 05:32:07 +01:00
|
|
|
|
2017-03-22 15:00:30 +01:00
|
|
|
/* Sync the output file if one is defined */
|
|
|
|
if (AH->dosync && AH->mode == archModeWrite && AH->fSpec)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
(void) fsync_fname(AH->fSpec, false);
|
2017-03-22 15:00:30 +01:00
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
AH->FH = NULL;
|
|
|
|
}
|
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
/*
|
|
|
|
* Reopen the archive's file handle.
|
|
|
|
*
|
|
|
|
* We close the original file handle, except on Windows. (The difference
|
|
|
|
* is because on Windows, this is used within a multithreading context,
|
|
|
|
* and we don't want a thread closing the parent file handle.)
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_ReopenArchive(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
pgoff_t tpos;
|
|
|
|
|
|
|
|
if (AH->mode == archModeWrite)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("can only reopen input archives");
|
2011-08-29 03:48:58 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These two cases are user-facing errors since they represent unsupported
|
|
|
|
* (but not invalid) use-cases. Word the error messages appropriately.
|
|
|
|
*/
|
2009-02-02 21:07:37 +01:00
|
|
|
if (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("parallel restore from standard input is not supported");
|
2009-02-02 21:07:37 +01:00
|
|
|
if (!ctx->hasSeek)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("parallel restore from non-seekable file is not supported");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
tpos = ftello(AH->FH);
|
2014-02-09 19:29:36 +01:00
|
|
|
if (tpos < 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not determine seek position in archive file: %m");
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
#ifndef WIN32
|
|
|
|
if (fclose(AH->FH) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close archive file: %m");
|
2009-02-02 21:07:37 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
AH->FH = fopen(AH->fSpec, PG_BINARY_R);
|
|
|
|
if (!AH->FH)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
|
2009-02-02 21:07:37 +01:00
|
|
|
|
|
|
|
if (fseeko(AH->FH, tpos, SEEK_SET) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not set seek position in archive file: %m");
|
2009-02-02 21:07:37 +01:00
|
|
|
}
|
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/*
|
|
|
|
* Prepare for parallel restore.
|
|
|
|
*
|
|
|
|
* The main thing that needs to happen here is to fill in TABLE DATA and BLOBS
|
|
|
|
* TOC entries' dataLength fields with appropriate values to guide the
|
|
|
|
* ordering of restore jobs. The source of said data is format-dependent,
|
|
|
|
* as is the exact meaning of the values.
|
|
|
|
*
|
|
|
|
* A format module might also choose to do other setup here.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_PrepParallelRestore(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
TocEntry *prev_te = NULL;
|
|
|
|
lclTocEntry *prev_tctx = NULL;
|
|
|
|
TocEntry *te;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Knowing that the data items were dumped out in TOC order, we can
|
|
|
|
* reconstruct the length of each item as the delta to the start offset of
|
|
|
|
* the next data item.
|
|
|
|
*/
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore entries without a known data offset; if we were unable to
|
|
|
|
* seek to rewrite the TOC when creating the archive, this'll be all
|
|
|
|
* of them, and we'll end up with no size estimates.
|
|
|
|
*/
|
|
|
|
if (tctx->dataState != K_OFFSET_POS_SET)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Compute previous data item's length */
|
|
|
|
if (prev_te)
|
|
|
|
{
|
|
|
|
if (tctx->dataPos > prev_tctx->dataPos)
|
|
|
|
prev_te->dataLength = tctx->dataPos - prev_tctx->dataPos;
|
|
|
|
}
|
|
|
|
|
|
|
|
prev_te = te;
|
|
|
|
prev_tctx = tctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If OK to seek, we can determine the length of the last item */
|
|
|
|
if (prev_te && ctx->hasSeek)
|
|
|
|
{
|
|
|
|
pgoff_t endpos;
|
|
|
|
|
|
|
|
if (fseeko(AH->FH, 0, SEEK_END) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("error during file seek: %m");
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
endpos = ftello(AH->FH);
|
|
|
|
if (endpos > prev_tctx->dataPos)
|
|
|
|
prev_te->dataLength = endpos - prev_tctx->dataPos;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-03 13:58:24 +01:00
|
|
|
/*
|
|
|
|
* Clone format-specific fields during parallel restoration.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_Clone(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
/*
|
|
|
|
* Each thread must have private lclContext working state.
|
|
|
|
*/
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->formatData = (lclContext *) pg_malloc(sizeof(lclContext));
|
2010-12-03 13:58:24 +01:00
|
|
|
memcpy(AH->formatData, ctx, sizeof(lclContext));
|
|
|
|
ctx = (lclContext *) AH->formatData;
|
|
|
|
|
|
|
|
/* sanity check, shouldn't happen */
|
|
|
|
if (ctx->cs != NULL)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("compressor active");
|
2010-12-03 13:58:24 +01:00
|
|
|
|
|
|
|
/*
|
Cope with data-offset-less archive files during out-of-order restores.
pg_dump produces custom-format archive files that lack data offsets
when it is unable to seek its output. Up to now that's been a hazard
for pg_restore. But if pg_restore is able to seek in the archive
file, there is no reason to throw up our hands when asked to restore
data blocks out of order. Instead, whenever we are searching for a
data block, record the locations of the blocks we passed over (that
is, fill in the missing data-offset fields in our in-memory copy of
the TOC data). Then, when we hit a case that requires going
backwards, we can just seek back.
Also track the furthest point that we've searched to, and seek back
to there when beginning a search for a new data block. This avoids
possible O(N^2) time consumption, by ensuring that each data block
is examined at most twice. (On Unix systems, that's at most twice
per parallel-restore job; but since Windows uses threads here, the
threads can share block location knowledge, reducing the amount of
duplicated work.)
We can also improve the code a bit by using fseeko() to skip over
data blocks during the search.
This is all of some use even in simple restores, but it's really
significant for parallel pg_restore. In that case, we require
seekability of the input already, and we will very probably need
to do out-of-order restores.
Back-patch to v12, as this fixes a regression introduced by commit
548e50976. Before that, parallel restore avoided requesting
out-of-order restores, so it would work on a data-offset-less
archive. Now it will again.
Ideally this patch would include some test coverage, but there are
other open bugs that need to be fixed before we can extend our
coverage of parallel restore very much. Plan to revisit that later.
David Gilman and Tom Lane; reviewed by Justin Pryzby
Discussion: https://postgr.es/m/CALBH9DDuJ+scZc4MEvw5uO-=vRyR2=QF9+Yh=3hPEnKHWfS81A@mail.gmail.com
2020-07-17 19:03:50 +02:00
|
|
|
* We intentionally do not clone TOC-entry-local state: it's useful to
|
|
|
|
* share knowledge about where the data blocks are across threads.
|
|
|
|
* _PrintTocData has to be careful about the order of operations on that
|
|
|
|
* state, though.
|
2010-12-03 13:58:24 +01:00
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_DeClone(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2011-04-10 17:42:00 +02:00
|
|
|
|
2010-12-03 13:58:24 +01:00
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
* This function is executed in the child of a parallel restore from a
|
|
|
|
* custom-format archive and restores the actual data for one TOC entry.
|
2013-03-24 16:27:20 +01:00
|
|
|
*/
|
|
|
|
static int
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
_WorkerJobRestoreCustom(ArchiveHandle *AH, TocEntry *te)
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
return parallel_restore(AH, te);
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
|
|
|
|
2000-07-21 13:40:08 +02:00
|
|
|
/*--------------------------------------------------
|
|
|
|
* END OF FORMAT CALLBACKS
|
|
|
|
*--------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the current position in the archive file.
|
2020-07-17 18:14:28 +02:00
|
|
|
*
|
|
|
|
* With a non-seekable archive file, we may not be able to obtain the
|
|
|
|
* file position. If so, just return -1. It's not too important in
|
|
|
|
* that case because we won't be able to rewrite the TOC to fill in
|
|
|
|
* data block offsets anyway.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
2007-02-19 16:05:06 +01:00
|
|
|
static pgoff_t
|
2000-07-21 13:40:08 +02:00
|
|
|
_getFilePos(ArchiveHandle *AH, lclContext *ctx)
|
|
|
|
{
|
2007-02-19 16:05:06 +01:00
|
|
|
pgoff_t pos;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2020-07-17 18:14:28 +02:00
|
|
|
pos = ftello(AH->FH);
|
|
|
|
if (pos < 0)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2020-07-17 18:14:28 +02:00
|
|
|
/* Not expected if we found we can seek. */
|
|
|
|
if (ctx->hasSeek)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not determine seek position in archive file: %m");
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a data block header. The format changed in V1.3, so we
|
2010-06-27 21:07:24 +02:00
|
|
|
* centralize the code here for simplicity. Returns *type = EOF
|
|
|
|
* if at EOF.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_readBlockHeader(ArchiveHandle *AH, int *type, int *id)
|
|
|
|
{
|
2010-06-27 21:07:24 +02:00
|
|
|
int byt;
|
|
|
|
|
|
|
|
/*
|
2022-04-08 20:55:14 +02:00
|
|
|
* Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal()
|
2010-06-27 21:07:24 +02:00
|
|
|
* inside ReadInt rather than returning EOF. It doesn't seem worth
|
|
|
|
* jumping through hoops to deal with that case better, because no such
|
|
|
|
* files are likely to exist in the wild: only some 7.1 development
|
|
|
|
* versions of pg_dump ever generated such files.
|
|
|
|
*/
|
2000-07-21 13:40:08 +02:00
|
|
|
if (AH->version < K_VERS_1_3)
|
|
|
|
*type = BLK_DATA;
|
|
|
|
else
|
2010-06-27 21:07:24 +02:00
|
|
|
{
|
|
|
|
byt = getc(AH->FH);
|
|
|
|
*type = byt;
|
|
|
|
if (byt == EOF)
|
|
|
|
{
|
|
|
|
*id = 0; /* don't return an uninitialized value */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
|
|
|
|
*id = ReadInt(AH);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-02-23 18:33:30 +01:00
|
|
|
* Callback function for writeData. Writes one block of (compressed)
|
2010-12-02 20:39:03 +01:00
|
|
|
* data to the archive.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2010-12-02 20:39:03 +01:00
|
|
|
_CustomWriteFunc(ArchiveHandle *AH, const char *buf, size_t len)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2010-12-02 20:39:03 +01:00
|
|
|
/* never write 0-byte blocks (this should not happen) */
|
2014-05-06 02:27:16 +02:00
|
|
|
if (len > 0)
|
|
|
|
{
|
|
|
|
WriteInt(AH, len);
|
|
|
|
_WriteBuf(AH, buf, len);
|
|
|
|
}
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-02-23 18:33:30 +01:00
|
|
|
* Callback function for readData. To keep things simple, we
|
2010-12-02 20:39:03 +01:00
|
|
|
* always read one compressed block at a time.
|
2000-07-21 13:40:08 +02:00
|
|
|
*/
|
2010-12-02 20:39:03 +01:00
|
|
|
static size_t
|
|
|
|
_CustomReadFunc(ArchiveHandle *AH, char **buf, size_t *buflen)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2010-12-02 20:39:03 +01:00
|
|
|
size_t blkLen;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2010-12-02 20:39:03 +01:00
|
|
|
/* Read length */
|
|
|
|
blkLen = ReadInt(AH);
|
|
|
|
if (blkLen == 0)
|
|
|
|
return 0;
|
2000-07-21 13:40:08 +02:00
|
|
|
|
2010-12-02 20:39:03 +01:00
|
|
|
/* If the caller's buffer is not large enough, allocate a bigger one */
|
|
|
|
if (blkLen > *buflen)
|
2000-07-21 13:40:08 +02:00
|
|
|
{
|
2010-12-02 20:39:03 +01:00
|
|
|
free(*buf);
|
2011-11-25 21:40:51 +01:00
|
|
|
*buf = (char *) pg_malloc(blkLen);
|
2010-12-02 20:39:03 +01:00
|
|
|
*buflen = blkLen;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
/* exits app on read errors */
|
|
|
|
_ReadBuf(AH, *buf, blkLen);
|
|
|
|
|
|
|
|
return blkLen;
|
2000-07-21 13:40:08 +02:00
|
|
|
}
|