2011-01-23 22:10:15 +01:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_backup_directory.c
|
|
|
|
*
|
|
|
|
* A directory format dump is a directory, which contains a "toc.dat" file
|
|
|
|
* for the TOC, and a separate file for each data entry, named "<oid>.dat".
|
2022-12-05 08:52:11 +01:00
|
|
|
* Large objects are stored in separate files named "blob_<oid>.dat",
|
2011-01-23 22:10:15 +01:00
|
|
|
* and there's a plain-text TOC file for them called "blobs.toc". If
|
|
|
|
* compression is used, each data file is individually compressed and the
|
|
|
|
* ".gz" suffix is added to the filenames. The TOC files are never
|
|
|
|
* compressed by pg_dump, however they are accepted with the .gz suffix too,
|
|
|
|
* in case the user has manually compressed them with 'gzip'.
|
|
|
|
*
|
|
|
|
* NOTE: This format is identical to the files written in the tar file in
|
|
|
|
* the 'tar' format, except that we don't write the restore.sql file (TODO),
|
|
|
|
* and the tar format doesn't support compression. Please keep the formats in
|
|
|
|
* sync.
|
|
|
|
*
|
|
|
|
*
|
2023-01-02 21:00:37 +01:00
|
|
|
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
2011-01-23 22:10:15 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
* Portions Copyright (c) 2000, Philip Warner
|
|
|
|
*
|
|
|
|
* Rights are granted to use this software in any way so long
|
|
|
|
* as this notice is not removed.
|
|
|
|
*
|
|
|
|
* The author is not responsible for loss or damages that may
|
2019-05-23 03:17:41 +02:00
|
|
|
* result from its use.
|
2011-01-23 22:10:15 +01:00
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
|
|
|
* src/bin/pg_dump/pg_backup_directory.c
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2014-10-14 20:00:55 +02:00
|
|
|
#include "postgres_fe.h"
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2019-10-23 06:08:53 +02:00
|
|
|
#include <dirent.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
|
|
|
#include "common/file_utils.h"
|
2011-01-23 22:10:15 +01:00
|
|
|
#include "compress_io.h"
|
2013-03-24 16:27:20 +01:00
|
|
|
#include "parallel.h"
|
2014-10-14 20:00:55 +02:00
|
|
|
#include "pg_backup_utils.h"
|
2011-01-23 22:44:07 +01:00
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Our archive location. This is basically what the user specified as his
|
|
|
|
* backup file but of course here it is a directory.
|
|
|
|
*/
|
|
|
|
char *directory;
|
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *dataFH; /* currently open data file */
|
|
|
|
CompressFileHandle *LOsTocFH; /* file handle for blobs.toc */
|
2013-03-24 16:27:20 +01:00
|
|
|
ParallelState *pstate; /* for parallel backup / restore */
|
2011-01-23 22:10:15 +01:00
|
|
|
} lclContext;
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
char *filename; /* filename excluding the directory (basename) */
|
|
|
|
} lclTocEntry;
|
|
|
|
|
|
|
|
/* prototypes for private functions */
|
|
|
|
static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _StartData(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _EndData(ArchiveHandle *AH, TocEntry *te);
|
2014-05-06 02:27:16 +02:00
|
|
|
static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
|
2011-01-23 22:10:15 +01:00
|
|
|
static int _WriteByte(ArchiveHandle *AH, const int i);
|
2022-09-23 01:41:23 +02:00
|
|
|
static int _ReadByte(ArchiveHandle *AH);
|
2014-05-06 02:27:16 +02:00
|
|
|
static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
|
|
|
|
static void _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
|
2016-01-13 23:48:33 +01:00
|
|
|
static void _CloseArchive(ArchiveHandle *AH);
|
2013-03-24 16:27:20 +01:00
|
|
|
static void _ReopenArchive(ArchiveHandle *AH);
|
2016-01-13 23:48:33 +01:00
|
|
|
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
static void _StartLOs(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
|
|
static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
|
|
static void _EndLOs(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _LoadLOs(ArchiveHandle *AH);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
static void _PrepParallelRestore(ArchiveHandle *AH);
|
2013-03-24 16:27:20 +01:00
|
|
|
static void _Clone(ArchiveHandle *AH);
|
|
|
|
static void _DeClone(ArchiveHandle *AH);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
static int _WorkerJobRestoreDirectory(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static int _WorkerJobDumpDirectory(ArchiveHandle *AH, TocEntry *te);
|
2013-03-24 16:27:20 +01:00
|
|
|
|
|
|
|
static void setFilePath(ArchiveHandle *AH, char *buf,
|
|
|
|
const char *relativeFilename);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Init routine required by ALL formats. This is a global routine
|
|
|
|
* and should be declared in pg_backup_archiver.h
|
|
|
|
*
|
|
|
|
* Its task is to create any extra archive context (using AH->formatData),
|
|
|
|
* and to initialize the supported function pointers.
|
|
|
|
*
|
|
|
|
* It should also prepare whatever its input source is for reading/writing,
|
|
|
|
* and in the case of a read mode connection, it should load the Header & TOC.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
InitArchiveFmt_Directory(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx;
|
|
|
|
|
|
|
|
/* Assuming static functions, this can be copied for each format. */
|
|
|
|
AH->ArchiveEntryPtr = _ArchiveEntry;
|
|
|
|
AH->StartDataPtr = _StartData;
|
|
|
|
AH->WriteDataPtr = _WriteData;
|
|
|
|
AH->EndDataPtr = _EndData;
|
|
|
|
AH->WriteBytePtr = _WriteByte;
|
|
|
|
AH->ReadBytePtr = _ReadByte;
|
|
|
|
AH->WriteBufPtr = _WriteBuf;
|
|
|
|
AH->ReadBufPtr = _ReadBuf;
|
|
|
|
AH->ClosePtr = _CloseArchive;
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->ReopenPtr = _ReopenArchive;
|
2011-01-23 22:10:15 +01:00
|
|
|
AH->PrintTocDataPtr = _PrintTocData;
|
|
|
|
AH->ReadExtraTocPtr = _ReadExtraToc;
|
|
|
|
AH->WriteExtraTocPtr = _WriteExtraToc;
|
|
|
|
AH->PrintExtraTocPtr = _PrintExtraToc;
|
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
AH->StartLOsPtr = _StartLOs;
|
|
|
|
AH->StartLOPtr = _StartLO;
|
|
|
|
AH->EndLOPtr = _EndLO;
|
|
|
|
AH->EndLOsPtr = _EndLOs;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
AH->PrepParallelRestorePtr = _PrepParallelRestore;
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->ClonePtr = _Clone;
|
|
|
|
AH->DeClonePtr = _DeClone;
|
|
|
|
|
|
|
|
AH->WorkerJobRestorePtr = _WorkerJobRestoreDirectory;
|
|
|
|
AH->WorkerJobDumpPtr = _WorkerJobDumpDirectory;
|
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
/* Set up our private context */
|
2012-10-02 21:35:10 +02:00
|
|
|
ctx = (lclContext *) pg_malloc0(sizeof(lclContext));
|
2011-01-23 22:10:15 +01:00
|
|
|
AH->formatData = (void *) ctx;
|
|
|
|
|
|
|
|
ctx->dataFH = NULL;
|
2022-12-05 08:52:11 +01:00
|
|
|
ctx->LOsTocFH = NULL;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
/* Initialize LO buffering */
|
|
|
|
AH->lo_buf_size = LOBBUFSIZE;
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->lo_buf = (void *) pg_malloc(LOBBUFSIZE);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now open the TOC file
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!AH->fSpec || strcmp(AH->fSpec, "") == 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("no output directory specified");
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
ctx->directory = AH->fSpec;
|
|
|
|
|
|
|
|
if (AH->mode == archModeWrite)
|
|
|
|
{
|
2013-03-24 16:27:20 +01:00
|
|
|
struct stat st;
|
|
|
|
bool is_empty = false;
|
|
|
|
|
|
|
|
/* we accept an empty existing directory */
|
|
|
|
if (stat(ctx->directory, &st) == 0 && S_ISDIR(st.st_mode))
|
|
|
|
{
|
|
|
|
DIR *dir = opendir(ctx->directory);
|
|
|
|
|
|
|
|
if (dir)
|
|
|
|
{
|
|
|
|
struct dirent *d;
|
|
|
|
|
|
|
|
is_empty = true;
|
2014-03-21 18:45:11 +01:00
|
|
|
while (errno = 0, (d = readdir(dir)))
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
|
|
|
if (strcmp(d->d_name, ".") != 0 && strcmp(d->d_name, "..") != 0)
|
|
|
|
{
|
|
|
|
is_empty = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-03-21 18:45:11 +01:00
|
|
|
|
|
|
|
if (errno)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not read directory \"%s\": %m",
|
|
|
|
ctx->directory);
|
2014-03-21 18:45:11 +01:00
|
|
|
|
|
|
|
if (closedir(dir))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close directory \"%s\": %m",
|
|
|
|
ctx->directory);
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_empty && mkdir(ctx->directory, 0700) < 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not create directory \"%s\": %m",
|
|
|
|
ctx->directory);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{ /* Read Mode */
|
2013-03-24 16:27:20 +01:00
|
|
|
char fname[MAXPGPATH];
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *tocFH;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
setFilePath(AH, fname, "toc.dat");
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
tocFH = InitDiscoverCompressFileHandle(fname, PG_BINARY_R);
|
2011-01-23 22:10:15 +01:00
|
|
|
if (tocFH == NULL)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open input file \"%s\": %m", fname);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
ctx->dataFH = tocFH;
|
2011-04-10 17:42:00 +02:00
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
/*
|
|
|
|
* The TOC of a directory format dump shares the format code of the
|
|
|
|
* tar format.
|
|
|
|
*/
|
|
|
|
AH->format = archTar;
|
|
|
|
ReadHead(AH);
|
|
|
|
AH->format = archDirectory;
|
|
|
|
ReadToc(AH);
|
|
|
|
|
|
|
|
/* Nothing else in the file, so close it again... */
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!EndCompressFileHandle(tocFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close TOC file: %m");
|
2011-01-23 22:10:15 +01:00
|
|
|
ctx->dataFH = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the Archiver when the dumper creates a new TOC entry.
|
|
|
|
*
|
|
|
|
* We determine the filename for this entry.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx;
|
|
|
|
char fn[MAXPGPATH];
|
|
|
|
|
2012-10-02 21:35:10 +02:00
|
|
|
tctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
if (strcmp(te->desc, "BLOBS") == 0)
|
|
|
|
tctx->filename = pg_strdup("blobs.toc");
|
|
|
|
else if (te->dataDumper)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
snprintf(fn, MAXPGPATH, "%d.dat", te->dumpId);
|
2011-11-25 21:40:51 +01:00
|
|
|
tctx->filename = pg_strdup(fn);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
tctx->filename = NULL;
|
|
|
|
|
|
|
|
te->formatData = (void *) tctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the Archiver to save any extra format-related TOC entry
|
|
|
|
* data.
|
|
|
|
*
|
|
|
|
* Use the Archiver routines to write data - they are non-endian, and
|
|
|
|
* maintain other important file information.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_WriteExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A dumpable object has set tctx->filename, any other object has not.
|
|
|
|
* (see _ArchiveEntry).
|
|
|
|
*/
|
|
|
|
if (tctx->filename)
|
|
|
|
WriteStr(AH, tctx->filename);
|
|
|
|
else
|
|
|
|
WriteStr(AH, "");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the Archiver to read any extra format-related TOC data.
|
|
|
|
*
|
|
|
|
* Needs to match the order defined in _WriteExtraToc, and should also
|
|
|
|
* use the Archiver input routines.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
if (tctx == NULL)
|
|
|
|
{
|
2012-10-02 21:35:10 +02:00
|
|
|
tctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
2011-01-23 22:10:15 +01:00
|
|
|
te->formatData = (void *) tctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
tctx->filename = ReadStr(AH);
|
|
|
|
if (strlen(tctx->filename) == 0)
|
|
|
|
{
|
|
|
|
free(tctx->filename);
|
|
|
|
tctx->filename = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the Archiver when restoring an archive to output a comment
|
|
|
|
* that includes useful information about the TOC entry.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_PrintExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
if (AH->public.verbose && tctx->filename)
|
|
|
|
ahprintf(AH, "-- File: %s\n", tctx->filename);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the archiver when saving TABLE DATA (not schema). This routine
|
|
|
|
* should save whatever format-specific information is needed to read
|
|
|
|
* the archive back.
|
|
|
|
*
|
|
|
|
* It is called just prior to the dumper's 'DataDumper' routine being called.
|
|
|
|
*
|
|
|
|
* We create the data file for writing.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_StartData(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2013-03-24 16:27:20 +01:00
|
|
|
char fname[MAXPGPATH];
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
setFilePath(AH, fname, tctx->filename);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
ctx->dataFH = InitCompressFileHandle(AH->compression_spec);
|
|
|
|
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!ctx->dataFH->open_write_func(fname, PG_BINARY_W, ctx->dataFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open output file \"%s\": %m", fname);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by archiver when dumper calls WriteData. This routine is
|
2022-12-05 08:52:11 +01:00
|
|
|
* called for both LO and table data; it is the responsibility of
|
|
|
|
* the format to manage each kind of data using StartLO/StartData.
|
2011-01-23 22:10:15 +01:00
|
|
|
*
|
|
|
|
* It should only be called from within a DataDumper routine.
|
|
|
|
*
|
|
|
|
* We write the data to the open data file.
|
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2011-01-23 22:10:15 +01:00
|
|
|
_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *CFH = ctx->dataFH;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2020-06-19 22:46:07 +02:00
|
|
|
errno = 0;
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (dLen > 0 && !CFH->write_func(data, dLen, CFH))
|
2020-06-19 22:46:07 +02:00
|
|
|
{
|
|
|
|
/* if write didn't set errno, assume problem is no disk space */
|
|
|
|
if (errno == 0)
|
|
|
|
errno = ENOSPC;
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not write to output file: %s",
|
2023-02-23 18:33:30 +01:00
|
|
|
CFH->get_error_func(CFH));
|
2020-06-19 22:46:07 +02:00
|
|
|
}
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the archiver when a dumper's 'DataDumper' routine has
|
|
|
|
* finished.
|
|
|
|
*
|
|
|
|
* We close the data file.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_EndData(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
|
|
|
/* Close the file */
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!EndCompressFileHandle(ctx->dataFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close data file: %m");
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
ctx->dataFH = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Print data for a given file (can be a LO as well)
|
2011-01-23 22:10:15 +01:00
|
|
|
*/
|
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_PrintFileData(ArchiveHandle *AH, char *filename)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
size_t cnt = 0;
|
2011-01-23 22:10:15 +01:00
|
|
|
char *buf;
|
|
|
|
size_t buflen;
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *CFH;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
if (!filename)
|
|
|
|
return;
|
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
CFH = InitDiscoverCompressFileHandle(filename, PG_BINARY_R);
|
|
|
|
if (!CFH)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open input file \"%s\": %m", filename);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2023-03-23 17:52:32 +01:00
|
|
|
buflen = DEFAULT_IO_BUFFER_SIZE;
|
|
|
|
buf = pg_malloc(buflen);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
while (CFH->read_func(buf, buflen, &cnt, CFH) && cnt > 0)
|
2016-05-29 19:18:48 +02:00
|
|
|
{
|
2011-01-23 22:10:15 +01:00
|
|
|
ahwrite(buf, 1, cnt, AH);
|
2016-05-29 19:18:48 +02:00
|
|
|
}
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
free(buf);
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!EndCompressFileHandle(CFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close data file \"%s\": %m", filename);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print data for a given TOC entry
|
|
|
|
*/
|
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_PrintTocData(ArchiveHandle *AH, TocEntry *te)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
if (!tctx->filename)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (strcmp(te->desc, "BLOBS") == 0)
|
2022-12-05 08:52:11 +01:00
|
|
|
_LoadLOs(AH);
|
2011-01-23 22:10:15 +01:00
|
|
|
else
|
|
|
|
{
|
2013-03-24 16:27:20 +01:00
|
|
|
char fname[MAXPGPATH];
|
2011-04-10 17:42:00 +02:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
setFilePath(AH, fname, tctx->filename);
|
2016-01-13 23:48:33 +01:00
|
|
|
_PrintFileData(AH, fname);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_LoadLOs(ArchiveHandle *AH)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
Oid oid;
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *CFH;
|
2021-10-19 12:59:54 +02:00
|
|
|
char tocfname[MAXPGPATH];
|
2011-01-23 22:10:15 +01:00
|
|
|
char line[MAXPGPATH];
|
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
StartRestoreLOs(AH);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2021-10-19 12:59:54 +02:00
|
|
|
setFilePath(AH, tocfname, "blobs.toc");
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
CFH = ctx->LOsTocFH = InitDiscoverCompressFileHandle(tocfname, PG_BINARY_R);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
if (ctx->LOsTocFH == NULL)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open large object TOC file \"%s\" for input: %m",
|
|
|
|
tocfname);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
/* Read the LOs TOC file line-by-line, and process each LO */
|
2023-02-23 18:33:30 +01:00
|
|
|
while ((CFH->gets_func(line, MAXPGPATH, CFH)) != NULL)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
2022-12-05 08:52:11 +01:00
|
|
|
char lofname[MAXPGPATH + 1];
|
2011-01-23 22:10:15 +01:00
|
|
|
char path[MAXPGPATH];
|
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
/* Can't overflow because line and lofname are the same length */
|
|
|
|
if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, lofname) != 2)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("invalid line in large object TOC file \"%s\": \"%s\"",
|
|
|
|
tocfname, line);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
StartRestoreLO(AH, oid, AH->public.ropt->dropSchema);
|
|
|
|
snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, lofname);
|
2016-01-13 23:48:33 +01:00
|
|
|
_PrintFileData(AH, path);
|
2022-12-05 08:52:11 +01:00
|
|
|
EndRestoreLO(AH, oid);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
2023-02-23 18:33:30 +01:00
|
|
|
if (!CFH->eof_func(CFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("error reading large object TOC file \"%s\"",
|
|
|
|
tocfname);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!EndCompressFileHandle(ctx->LOsTocFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close large object TOC file \"%s\": %m",
|
|
|
|
tocfname);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
ctx->LOsTocFH = NULL;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
EndRestoreLOs(AH);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a byte of data to the archive.
|
|
|
|
* Called by the archiver to do integer & byte output to the archive.
|
|
|
|
* These routines are only used to read & write the headers & TOC.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
_WriteByte(ArchiveHandle *AH, const int i)
|
|
|
|
{
|
|
|
|
unsigned char c = (unsigned char) i;
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *CFH = ctx->dataFH;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2020-06-19 22:46:07 +02:00
|
|
|
errno = 0;
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!CFH->write_func(&c, 1, CFH))
|
2020-06-19 22:46:07 +02:00
|
|
|
{
|
|
|
|
/* if write didn't set errno, assume problem is no disk space */
|
|
|
|
if (errno == 0)
|
|
|
|
errno = ENOSPC;
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not write to output file: %s",
|
2023-02-23 18:33:30 +01:00
|
|
|
CFH->get_error_func(CFH));
|
2020-06-19 22:46:07 +02:00
|
|
|
}
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a byte of data from the archive.
|
|
|
|
* Called by the archiver to read bytes & integers from the archive.
|
|
|
|
* These routines are only used to read & write headers & TOC.
|
|
|
|
* EOF should be treated as a fatal error.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
_ReadByte(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *CFH = ctx->dataFH;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
return CFH->getc_func(CFH);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a buffer of data to the archive.
|
|
|
|
* Called by the archiver to write a block of bytes to the TOC or a data file.
|
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2011-01-23 22:10:15 +01:00
|
|
|
_WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *CFH = ctx->dataFH;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2020-06-19 22:46:07 +02:00
|
|
|
errno = 0;
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!CFH->write_func(buf, len, CFH))
|
2020-06-19 22:46:07 +02:00
|
|
|
{
|
|
|
|
/* if write didn't set errno, assume problem is no disk space */
|
|
|
|
if (errno == 0)
|
|
|
|
errno = ENOSPC;
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not write to output file: %s",
|
2023-02-23 18:33:30 +01:00
|
|
|
CFH->get_error_func(CFH));
|
2020-06-19 22:46:07 +02:00
|
|
|
}
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a block of bytes from the archive.
|
|
|
|
*
|
|
|
|
* Called by the archiver to read a block of bytes from the archive
|
|
|
|
*/
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2011-01-23 22:10:15 +01:00
|
|
|
_ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *CFH = ctx->dataFH;
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
/*
|
2023-02-23 18:33:30 +01:00
|
|
|
* If there was an I/O error, we already exited in readF(), so here we
|
2014-05-06 02:27:16 +02:00
|
|
|
* exit on short reads.
|
|
|
|
*/
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!CFH->read_func(buf, len, NULL, CFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not read from input file: end of file");
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Close the archive.
|
|
|
|
*
|
|
|
|
* When writing the archive, this is the routine that actually starts
|
|
|
|
* the process of saving it to files. No data should be written prior
|
|
|
|
* to this point, since the user could sort the TOC after creating it.
|
|
|
|
*
|
|
|
|
* If an archive is to be written, this routine must call:
|
|
|
|
* WriteHead to save the archive header
|
|
|
|
* WriteToc to save the TOC entries
|
2022-12-05 08:52:11 +01:00
|
|
|
* WriteDataChunks to save all data & LOs.
|
2011-01-23 22:10:15 +01:00
|
|
|
*/
|
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_CloseArchive(ArchiveHandle *AH)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2011-04-10 17:42:00 +02:00
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
if (AH->mode == archModeWrite)
|
|
|
|
{
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *tocFH;
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
pg_compress_specification compression_spec = {0};
|
2013-03-24 16:27:20 +01:00
|
|
|
char fname[MAXPGPATH];
|
|
|
|
|
|
|
|
setFilePath(AH, fname, "toc.dat");
|
|
|
|
|
|
|
|
/* this will actually fork the processes for a parallel backup */
|
2016-01-13 23:48:33 +01:00
|
|
|
ctx->pstate = ParallelBackupStart(AH);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
/* The TOC is always created uncompressed */
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
compression_spec.algorithm = PG_COMPRESSION_NONE;
|
2023-02-23 18:33:30 +01:00
|
|
|
tocFH = InitCompressFileHandle(compression_spec);
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!tocFH->open_write_func(fname, PG_BINARY_W, tocFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open output file \"%s\": %m", fname);
|
2011-01-23 22:10:15 +01:00
|
|
|
ctx->dataFH = tocFH;
|
2011-04-10 17:42:00 +02:00
|
|
|
|
2011-01-23 22:10:15 +01:00
|
|
|
/*
|
|
|
|
* Write 'tar' in the format field of the toc.dat file. The directory
|
|
|
|
* is compatible with 'tar', so there's no point having a different
|
|
|
|
* format code for it.
|
|
|
|
*/
|
|
|
|
AH->format = archTar;
|
|
|
|
WriteHead(AH);
|
|
|
|
AH->format = archDirectory;
|
|
|
|
WriteToc(AH);
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!EndCompressFileHandle(tocFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close TOC file: %m");
|
2016-01-13 23:48:33 +01:00
|
|
|
WriteDataChunks(AH, ctx->pstate);
|
2013-03-24 16:27:20 +01:00
|
|
|
|
|
|
|
ParallelBackupEnd(AH, ctx->pstate);
|
2017-03-22 15:00:30 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In directory mode, there is no need to sync all the entries
|
|
|
|
* individually. Just recurse once through all the files generated.
|
|
|
|
*/
|
|
|
|
if (AH->dosync)
|
2023-09-07 01:27:00 +02:00
|
|
|
sync_dir_recurse(ctx->directory, AH->sync_method);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
AH->FH = NULL;
|
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* Reopen the archive's file handle.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_ReopenArchive(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Our TOC is in memory, our data files are opened by each child anyway as
|
|
|
|
* they are separate. We support reopening the archive by just doing
|
|
|
|
* nothing.
|
|
|
|
*/
|
|
|
|
}
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* LO support
|
2011-01-23 22:10:15 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the archiver when starting to save all BLOB DATA (not schema).
|
|
|
|
* It is called just prior to the dumper's DataDumper routine.
|
|
|
|
*
|
|
|
|
* We open the large object TOC file here, so that we can append a line to
|
2022-12-05 08:52:11 +01:00
|
|
|
* it for each LO.
|
2011-01-23 22:10:15 +01:00
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_StartLOs(ArchiveHandle *AH, TocEntry *te)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
pg_compress_specification compression_spec = {0};
|
2013-03-24 16:27:20 +01:00
|
|
|
char fname[MAXPGPATH];
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
setFilePath(AH, fname, "blobs.toc");
|
2011-01-23 22:10:15 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
/* The LO TOC file is never compressed */
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
compression_spec.algorithm = PG_COMPRESSION_NONE;
|
2023-02-23 18:33:30 +01:00
|
|
|
ctx->LOsTocFH = InitCompressFileHandle(compression_spec);
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!ctx->LOsTocFH->open_write_func(fname, "ab", ctx->LOsTocFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open output file \"%s\": %m", fname);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Called by the archiver when we're about to start dumping a LO.
|
2011-01-23 22:10:15 +01:00
|
|
|
*
|
2022-12-05 08:52:11 +01:00
|
|
|
* We create a file to write the LO to.
|
2011-01-23 22:10:15 +01:00
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
char fname[MAXPGPATH];
|
|
|
|
|
|
|
|
snprintf(fname, MAXPGPATH, "%s/blob_%u.dat", ctx->directory, oid);
|
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
ctx->dataFH = InitCompressFileHandle(AH->compression_spec);
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!ctx->dataFH->open_write_func(fname, PG_BINARY_W, ctx->dataFH))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open output file \"%s\": %m", fname);
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Called by the archiver when the dumper is finished writing a LO.
|
2011-01-23 22:10:15 +01:00
|
|
|
*
|
2022-12-05 08:52:11 +01:00
|
|
|
* We close the LO file and write an entry to the LO TOC file for it.
|
2011-01-23 22:10:15 +01:00
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2023-02-23 18:33:30 +01:00
|
|
|
CompressFileHandle *CFH = ctx->LOsTocFH;
|
2011-01-23 22:10:15 +01:00
|
|
|
char buf[50];
|
|
|
|
int len;
|
|
|
|
|
2023-02-23 18:33:30 +01:00
|
|
|
/* Close the BLOB data file itself */
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!EndCompressFileHandle(ctx->dataFH))
|
2023-02-24 08:49:28 +01:00
|
|
|
pg_fatal("could not close LO data file: %m");
|
2011-01-23 22:10:15 +01:00
|
|
|
ctx->dataFH = NULL;
|
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
/* register the LO in blobs.toc */
|
2011-01-23 22:10:15 +01:00
|
|
|
len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid);
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!CFH->write_func(buf, len, CFH))
|
2023-05-17 18:55:51 +02:00
|
|
|
{
|
|
|
|
/* if write didn't set errno, assume problem is no disk space */
|
|
|
|
if (errno == 0)
|
|
|
|
errno = ENOSPC;
|
|
|
|
pg_fatal("could not write to LOs TOC file: %s",
|
|
|
|
CFH->get_error_func(CFH));
|
|
|
|
}
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the archiver when finishing saving all BLOB DATA.
|
|
|
|
*
|
2022-12-05 08:52:11 +01:00
|
|
|
* We close the LOs TOC file.
|
2011-01-23 22:10:15 +01:00
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_EndLOs(ArchiveHandle *AH, TocEntry *te)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
Improve type handling in pg_dump's compress file API
After 0da243fed0 got committed, we've received a report about a compiler
warning, related to the new LZ4File_gets() function:
compress_lz4.c: In function 'LZ4File_gets':
compress_lz4.c:492:19: warning: comparison of unsigned expression in
'< 0' is always false [-Wtype-limits]
492 | if (dsize < 0)
The reason is very simple - dsize is declared as size_t, which is an
unsigned integer, and thus the check is pointless and we might fail to
notice an error in some cases (or fail in a strange way a bit later).
The warning could have been silenced by simply changing the type, but we
realized the API mostly assumes all the libraries use the same types and
report errors the same way (e.g. by returning 0 and/or negative value).
But we can't make this assumption - the gzip/lz4 libraries already
disagree on some of this, and even if they did a library added in the
future might not.
The right solution is to define what the API does, and translate the
library-specific behavior in consistent way (so that the internal errors
are not exposed to users of our compression API). So this adjusts the
data types in a couple places, so that we don't miss library errors, and
simplifies and unifies the error reporting to simply return true/false
(instead of e.g. size_t).
While at it, make sure LZ4File_open_write() does not clobber errno in
case open_func() fails.
Author: Georgios Kokolatos
Reported-by: Alexander Lakhin
Reviewed-by: Tomas Vondra, Justin Pryzby
Discussion: https://postgr.es/m/33496f7c-3449-1426-d568-63f6bca2ac1f@gmail.com
2023-03-23 17:51:55 +01:00
|
|
|
if (!EndCompressFileHandle(ctx->LOsTocFH))
|
2023-02-24 08:49:28 +01:00
|
|
|
pg_fatal("could not close LOs TOC file: %m");
|
2022-12-05 08:52:11 +01:00
|
|
|
ctx->LOsTocFH = NULL;
|
2011-01-23 22:10:15 +01:00
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* Gets a relative file name and prepends the output directory, writing the
|
|
|
|
* result to buf. The caller needs to make sure that buf is MAXPGPATH bytes
|
|
|
|
* big. Can't use a static char[MAXPGPATH] inside the function because we run
|
|
|
|
* multithreaded on Windows.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
setFilePath(ArchiveHandle *AH, char *buf, const char *relativeFilename)
|
2011-01-23 22:10:15 +01:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
char *dname;
|
|
|
|
|
|
|
|
dname = ctx->directory;
|
|
|
|
|
|
|
|
if (strlen(dname) + 1 + strlen(relativeFilename) + 1 > MAXPGPATH)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("file name too long: \"%s\"", dname);
|
2011-01-23 22:10:15 +01:00
|
|
|
|
|
|
|
strcpy(buf, dname);
|
|
|
|
strcat(buf, "/");
|
|
|
|
strcat(buf, relativeFilename);
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
/*
|
|
|
|
* Prepare for parallel restore.
|
|
|
|
*
|
|
|
|
* The main thing that needs to happen here is to fill in TABLE DATA and BLOBS
|
|
|
|
* TOC entries' dataLength fields with appropriate values to guide the
|
|
|
|
* ordering of restore jobs. The source of said data is format-dependent,
|
|
|
|
* as is the exact meaning of the values.
|
|
|
|
*
|
|
|
|
* A format module might also choose to do other setup here.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_PrepParallelRestore(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
TocEntry *te;
|
|
|
|
|
|
|
|
for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
char fname[MAXPGPATH];
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A dumpable object has set tctx->filename, any other object has not.
|
|
|
|
* (see _ArchiveEntry).
|
|
|
|
*/
|
|
|
|
if (tctx->filename == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* We may ignore items not due to be restored */
|
|
|
|
if ((te->reqs & REQ_DATA) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stat the file and, if successful, put its size in dataLength. When
|
|
|
|
* using compression, the physical file size might not be a very good
|
|
|
|
* guide to the amount of work involved in restoring the file, but we
|
|
|
|
* only need an approximate indicator of that.
|
|
|
|
*/
|
|
|
|
setFilePath(AH, fname, tctx->filename);
|
|
|
|
|
|
|
|
if (stat(fname, &st) == 0)
|
|
|
|
te->dataLength = st.st_size;
|
2023-02-23 21:19:19 +01:00
|
|
|
else if (AH->compression_spec.algorithm != PG_COMPRESSION_NONE)
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
{
|
2023-02-23 21:19:19 +01:00
|
|
|
if (AH->compression_spec.algorithm == PG_COMPRESSION_GZIP)
|
|
|
|
strlcat(fname, ".gz", sizeof(fname));
|
|
|
|
else if (AH->compression_spec.algorithm == PG_COMPRESSION_LZ4)
|
|
|
|
strlcat(fname, ".lz4", sizeof(fname));
|
2023-04-05 21:38:04 +02:00
|
|
|
else if (AH->compression_spec.algorithm == PG_COMPRESSION_ZSTD)
|
|
|
|
strlcat(fname, ".zst", sizeof(fname));
|
2023-02-23 21:19:19 +01:00
|
|
|
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
if (stat(fname, &st) == 0)
|
|
|
|
te->dataLength = st.st_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is the BLOBS entry, what we stat'd was blobs.toc, which
|
|
|
|
* most likely is a lot smaller than the actual blob data. We don't
|
|
|
|
* have a cheap way to estimate how much smaller, but fortunately it
|
2022-12-05 08:52:11 +01:00
|
|
|
* doesn't matter too much as long as we get the LOs processed
|
Improve parallel scheduling logic in pg_dump/pg_restore.
Previously, the way this worked was that a parallel pg_dump would
re-order the TABLE_DATA items in the dump's TOC into decreasing size
order, and separately re-order (some of) the INDEX items into decreasing
size order. Then pg_dump would dump the items in that order. Later,
parallel pg_restore just followed the TOC order. This method had lots
of deficiencies:
* TOC ordering randomly differed between parallel and non-parallel
dumps, and was hard to predict in the former case, causing problems
for building stable pg_dump test cases.
* Parallel restore only followed a well-chosen order if the dump had
been done in parallel; in particular, this never happened for restore
from custom-format dumps.
* The best order for restore isn't necessarily the same as for dump,
and it's not really static either because of locking considerations.
* TABLE_DATA and INDEX items aren't the only things that might take a lot
of work during restore. Scheduling was particularly stupid for the BLOBS
item, which might require lots of work during dump as well as restore,
but was left to the end in either case.
This patch removes the logic that changed the TOC order, fixing the
test instability problem. Instead, we sort the parallelizable items
just before processing them during a parallel dump. Independently
of that, parallel restore prioritizes the ready-to-execute tasks
based on the size of the underlying table. In the case of dependent
tasks such as index, constraint, or foreign key creation, the largest
relevant table is used as the metric for estimating the task length.
(This is pretty crude, but it should be enough to avoid the case we
want to avoid, which is ending the run with just a few large tasks
such that we can't make use of all N workers.)
Patch by me, responding to a complaint from Peter Eisentraut,
who also reviewed the patch.
Discussion: https://postgr.es/m/5137fe12-d0a2-4971-61b6-eb4e7e8875f8@2ndquadrant.com
2018-09-14 23:31:51 +02:00
|
|
|
* reasonably early. Arbitrarily scale up by a factor of 1K.
|
|
|
|
*/
|
|
|
|
if (strcmp(te->desc, "BLOBS") == 0)
|
|
|
|
te->dataLength *= 1024;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* Clone format-specific fields during parallel restoration.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_Clone(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
|
|
|
AH->formatData = (lclContext *) pg_malloc(sizeof(lclContext));
|
|
|
|
memcpy(AH->formatData, ctx, sizeof(lclContext));
|
|
|
|
ctx = (lclContext *) AH->formatData;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: we do not make a local lo_buf because we expect at most one BLOBS
|
|
|
|
* entry per archive, so no parallelism is possible. Likewise,
|
|
|
|
* TOC-entry-local state isn't an issue because any one TOC entry is
|
|
|
|
* touched by just one worker child.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2020-06-14 23:22:47 +02:00
|
|
|
* We also don't copy the ParallelState pointer (pstate), only the leader
|
2013-03-24 16:27:20 +01:00
|
|
|
* process ever writes to it.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_DeClone(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
* This function is executed in the child of a parallel backup for a
|
|
|
|
* directory-format archive and dumps the actual data for one TOC entry.
|
2013-03-24 16:27:20 +01:00
|
|
|
*/
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
static int
|
2016-01-13 23:48:33 +01:00
|
|
|
_WorkerJobDumpDirectory(ArchiveHandle *AH, TocEntry *te)
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This function returns void. We either fail and die horribly or
|
|
|
|
* succeed... A failure will be detected by the parent when the child dies
|
|
|
|
* unexpectedly.
|
|
|
|
*/
|
2016-01-13 23:48:33 +01:00
|
|
|
WriteDataChunksForTocEntry(AH, te);
|
2013-03-24 16:27:20 +01:00
|
|
|
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
return 0;
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
* This function is executed in the child of a parallel restore from a
|
|
|
|
* directory-format archive and restores the actual data for one TOC entry.
|
2013-03-24 16:27:20 +01:00
|
|
|
*/
|
|
|
|
static int
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
_WorkerJobRestoreDirectory(ArchiveHandle *AH, TocEntry *te)
|
2013-03-24 16:27:20 +01:00
|
|
|
{
|
Rationalize parallel dump/restore's handling of worker cmd/status messages.
The existing APIs for creating and parsing command and status messages are
rather messy; for example, archive-format modules have to provide code
for constructing command messages, which is entirely pointless since
the code to read them is hard-wired in WaitForCommands() and hence
no format-specific variation is actually possible. But there's little
foreseeable reason to need format-specific variation anyway.
The situation for status messages is no better; at least those are both
constructed and parsed by format-specific code, but said code is quite
redundant since there's no actual need for format-specific variation.
To add insult to injury, the first API involves returning pointers to
static buffers, which is bad, while the second involves returning pointers
to malloc'd strings, which is safer but randomly inconsistent.
Hence, get rid of the MasterStartParallelItem and MasterEndParallelItem
APIs, and instead write centralized functions that construct and parse
command and status messages. If we ever do need more flexibility, these
functions can be the standard implementations of format-specific
callback methods, but that's a long way off if it ever happens.
Tom Lane, reviewed by Kevin Grittner
Discussion: <17340.1464465717@sss.pgh.pa.us>
2016-09-27 19:56:04 +02:00
|
|
|
return parallel_restore(AH, te);
|
2013-03-24 16:27:20 +01:00
|
|
|
}
|