2000-07-21 13:43:26 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_backup_tar.c
|
|
|
|
*
|
|
|
|
* This file is copied from the 'files' format file, but dumps data into
|
|
|
|
* one temp file then sends it to the output TAR archive.
|
2011-01-23 22:10:15 +01:00
|
|
|
*
|
2013-01-17 05:49:54 +01:00
|
|
|
* The tar format also includes a 'restore.sql' script which is there for
|
|
|
|
* the benefit of humans. This script is never used by pg_restore.
|
|
|
|
*
|
2011-01-23 22:10:15 +01:00
|
|
|
* NOTE: If you untar the created 'tar' file, the resulting files are
|
|
|
|
* compatible with the 'directory' format. Please keep the two formats in
|
|
|
|
* sync.
|
2000-07-21 13:43:26 +02:00
|
|
|
*
|
2013-01-17 05:49:54 +01:00
|
|
|
* See the headers to pg_backup_directory & pg_restore for more details.
|
2000-07-21 13:43:26 +02:00
|
|
|
*
|
|
|
|
* Copyright (c) 2000, Philip Warner
|
|
|
|
* Rights are granted to use this software in any way so long
|
|
|
|
* as this notice is not removed.
|
|
|
|
*
|
|
|
|
* The author is not responsible for loss or damages that may
|
2019-05-23 03:17:41 +02:00
|
|
|
* result from its use.
|
2000-07-21 13:43:26 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/bin/pg_dump/pg_backup_tar.c
|
2001-01-12 05:32:07 +01:00
|
|
|
*
|
2000-07-21 13:43:26 +02:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2014-10-14 20:00:55 +02:00
|
|
|
#include "postgres_fe.h"
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2006-06-27 04:56:41 +02:00
|
|
|
#include <sys/stat.h>
|
2000-07-21 13:43:26 +02:00
|
|
|
#include <ctype.h>
|
2003-01-11 00:49:06 +01:00
|
|
|
#include <limits.h>
|
2000-07-21 13:43:26 +02:00
|
|
|
#include <unistd.h>
|
|
|
|
|
2019-10-23 06:08:53 +02:00
|
|
|
#include "common/file_utils.h"
|
|
|
|
#include "fe_utils/string_utils.h"
|
|
|
|
#include "pg_backup_archiver.h"
|
|
|
|
#include "pg_backup_tar.h"
|
|
|
|
#include "pg_backup_utils.h"
|
|
|
|
#include "pgtar.h"
|
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _StartData(ArchiveHandle *AH, TocEntry *te);
|
2014-05-06 02:27:16 +02:00
|
|
|
static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
|
2000-07-21 13:43:26 +02:00
|
|
|
static void _EndData(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static int _WriteByte(ArchiveHandle *AH, const int i);
|
2022-09-23 01:41:23 +02:00
|
|
|
static int _ReadByte(ArchiveHandle *AH);
|
2014-05-06 02:27:16 +02:00
|
|
|
static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
|
|
|
|
static void _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
|
2016-01-13 23:48:33 +01:00
|
|
|
static void _CloseArchive(ArchiveHandle *AH);
|
|
|
|
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te);
|
2000-07-21 13:43:26 +02:00
|
|
|
static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
static void _StartLOs(ArchiveHandle *AH, TocEntry *te);
|
|
|
|
static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
|
|
static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
|
|
static void _EndLOs(ArchiveHandle *AH, TocEntry *te);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
#define K_STD_BUF_SIZE 1024
|
|
|
|
|
|
|
|
|
2012-03-02 21:30:01 +01:00
|
|
|
typedef struct
|
|
|
|
{
|
2000-07-21 13:43:26 +02:00
|
|
|
FILE *nFH;
|
|
|
|
FILE *tarFH;
|
|
|
|
FILE *tmpFH;
|
|
|
|
char *targetFile;
|
|
|
|
char mode;
|
2007-02-19 16:05:06 +01:00
|
|
|
pgoff_t pos;
|
|
|
|
pgoff_t fileLen;
|
2000-07-21 13:43:26 +02:00
|
|
|
ArchiveHandle *AH;
|
|
|
|
} TAR_MEMBER;
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
int hasSeek;
|
2007-02-19 16:05:06 +01:00
|
|
|
pgoff_t filePos;
|
2022-12-05 08:52:11 +01:00
|
|
|
TAR_MEMBER *loToc;
|
2000-07-21 13:43:26 +02:00
|
|
|
FILE *tarFH;
|
2007-02-19 16:05:06 +01:00
|
|
|
pgoff_t tarFHpos;
|
|
|
|
pgoff_t tarNextMember;
|
2000-07-21 13:43:26 +02:00
|
|
|
TAR_MEMBER *FH;
|
|
|
|
int isSpecialScript;
|
|
|
|
TAR_MEMBER *scriptTH;
|
|
|
|
} lclContext;
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
TAR_MEMBER *TH;
|
|
|
|
char *filename;
|
|
|
|
} lclTocEntry;
|
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
static void _LoadLOs(ArchiveHandle *AH);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
static TAR_MEMBER *tarOpen(ArchiveHandle *AH, const char *filename, char mode);
|
2022-09-23 01:41:23 +02:00
|
|
|
static void tarClose(ArchiveHandle *AH, TAR_MEMBER *th);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
#ifdef __NOT_USED__
|
2002-08-20 19:54:45 +02:00
|
|
|
static char *tarGets(char *buf, size_t len, TAR_MEMBER *th);
|
2000-07-21 13:43:26 +02:00
|
|
|
#endif
|
2020-08-25 07:24:15 +02:00
|
|
|
static int tarPrintf(TAR_MEMBER *th, const char *fmt,...) pg_attribute_printf(2, 3);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
static void _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th);
|
|
|
|
static TAR_MEMBER *_tarPositionTo(ArchiveHandle *AH, const char *filename);
|
2002-08-20 19:54:45 +02:00
|
|
|
static size_t tarRead(void *buf, size_t len, TAR_MEMBER *th);
|
|
|
|
static size_t tarWrite(const void *buf, size_t len, TAR_MEMBER *th);
|
2000-07-21 13:43:26 +02:00
|
|
|
static void _tarWriteHeader(TAR_MEMBER *th);
|
|
|
|
static int _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th);
|
2002-08-20 19:54:45 +02:00
|
|
|
static size_t _tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
static size_t _scriptOut(ArchiveHandle *AH, const void *buf, size_t len);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initializer
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
InitArchiveFmt_Tar(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
/* Assuming static functions, this can be copied for each format. */
|
|
|
|
AH->ArchiveEntryPtr = _ArchiveEntry;
|
|
|
|
AH->StartDataPtr = _StartData;
|
|
|
|
AH->WriteDataPtr = _WriteData;
|
|
|
|
AH->EndDataPtr = _EndData;
|
|
|
|
AH->WriteBytePtr = _WriteByte;
|
|
|
|
AH->ReadBytePtr = _ReadByte;
|
|
|
|
AH->WriteBufPtr = _WriteBuf;
|
|
|
|
AH->ReadBufPtr = _ReadBuf;
|
|
|
|
AH->ClosePtr = _CloseArchive;
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->ReopenPtr = NULL;
|
2000-07-21 13:43:26 +02:00
|
|
|
AH->PrintTocDataPtr = _PrintTocData;
|
|
|
|
AH->ReadExtraTocPtr = _ReadExtraToc;
|
|
|
|
AH->WriteExtraTocPtr = _WriteExtraToc;
|
|
|
|
AH->PrintExtraTocPtr = _PrintExtraToc;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
AH->StartLOsPtr = _StartLOs;
|
|
|
|
AH->StartLOPtr = _StartLO;
|
|
|
|
AH->EndLOPtr = _EndLO;
|
|
|
|
AH->EndLOsPtr = _EndLOs;
|
2009-02-02 21:07:37 +01:00
|
|
|
AH->ClonePtr = NULL;
|
|
|
|
AH->DeClonePtr = NULL;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
AH->WorkerJobDumpPtr = NULL;
|
|
|
|
AH->WorkerJobRestorePtr = NULL;
|
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
/*
|
|
|
|
* Set up some special context used in compressing data.
|
|
|
|
*/
|
2022-09-12 08:31:56 +02:00
|
|
|
ctx = pg_malloc0_object(lclContext);
|
2000-07-21 13:43:26 +02:00
|
|
|
AH->formatData = (void *) ctx;
|
|
|
|
ctx->filePos = 0;
|
2003-10-08 05:52:32 +02:00
|
|
|
ctx->isSpecialScript = 0;
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2002-04-24 04:21:04 +02:00
|
|
|
/* Initialize LO buffering */
|
|
|
|
AH->lo_buf_size = LOBBUFSIZE;
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->lo_buf = (void *) pg_malloc(LOBBUFSIZE);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
/*
|
2011-01-23 22:10:15 +01:00
|
|
|
* Now open the tar file, and load the TOC if we're in read mode.
|
2000-07-21 13:43:26 +02:00
|
|
|
*/
|
|
|
|
if (AH->mode == archModeWrite)
|
|
|
|
{
|
|
|
|
if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:43:26 +02:00
|
|
|
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_W);
|
2007-10-28 22:55:52 +01:00
|
|
|
if (ctx->tarFH == NULL)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open TOC file \"%s\" for output: %m",
|
|
|
|
AH->fSpec);
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
else
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:43:26 +02:00
|
|
|
ctx->tarFH = stdout;
|
2007-10-28 22:55:52 +01:00
|
|
|
if (ctx->tarFH == NULL)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open TOC file for output: %m");
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2001-01-12 05:32:07 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
ctx->tarFHpos = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make unbuffered since we will dup() it, and the buffers screw each
|
|
|
|
* other
|
|
|
|
*/
|
2000-08-01 17:51:45 +02:00
|
|
|
/* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2002-10-25 03:33:17 +02:00
|
|
|
ctx->hasSeek = checkSeek(ctx->tarFH);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't support compression because reading the files back is not
|
|
|
|
* possible since gzdopen uses buffered IO which totally screws file
|
|
|
|
* positioning.
|
|
|
|
*/
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
if (AH->compression_spec.algorithm != PG_COMPRESSION_NONE)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("compression is not supported by tar archive format");
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{ /* Read Mode */
|
|
|
|
if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:43:26 +02:00
|
|
|
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_R);
|
2007-10-28 22:55:52 +01:00
|
|
|
if (ctx->tarFH == NULL)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open TOC file \"%s\" for input: %m",
|
|
|
|
AH->fSpec);
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
else
|
2007-10-28 22:55:52 +01:00
|
|
|
{
|
2000-07-21 13:43:26 +02:00
|
|
|
ctx->tarFH = stdin;
|
2007-10-28 22:55:52 +01:00
|
|
|
if (ctx->tarFH == NULL)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not open TOC file for input: %m");
|
2007-10-28 22:55:52 +01:00
|
|
|
}
|
2001-01-12 05:32:07 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
/*
|
|
|
|
* Make unbuffered since we will dup() it, and the buffers screw each
|
|
|
|
* other
|
|
|
|
*/
|
2000-08-01 17:51:45 +02:00
|
|
|
/* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
ctx->tarFHpos = 0;
|
|
|
|
|
2002-10-25 03:33:17 +02:00
|
|
|
ctx->hasSeek = checkSeek(ctx->tarFH);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
ctx->FH = (void *) tarOpen(AH, "toc.dat", 'r');
|
|
|
|
ReadHead(AH);
|
|
|
|
ReadToc(AH);
|
|
|
|
tarClose(AH, ctx->FH); /* Nothing else in the file... */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* - Start a new TOC entry
|
|
|
|
* Setup the output file name.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *ctx;
|
|
|
|
char fn[K_STD_BUF_SIZE];
|
|
|
|
|
2022-09-12 08:31:56 +02:00
|
|
|
ctx = pg_malloc0_object(lclTocEntry);
|
2001-04-14 15:11:03 +02:00
|
|
|
if (te->dataDumper != NULL)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2022-03-31 03:34:10 +02:00
|
|
|
snprintf(fn, sizeof(fn), "%d.dat", te->dumpId);
|
2011-11-25 21:40:51 +01:00
|
|
|
ctx->filename = pg_strdup(fn);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ctx->filename = NULL;
|
|
|
|
ctx->TH = NULL;
|
|
|
|
}
|
|
|
|
te->formatData = (void *) ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_WriteExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
if (ctx->filename)
|
|
|
|
WriteStr(AH, ctx->filename);
|
|
|
|
else
|
|
|
|
WriteStr(AH, "");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
if (ctx == NULL)
|
|
|
|
{
|
2022-09-12 08:31:56 +02:00
|
|
|
ctx = pg_malloc0_object(lclTocEntry);
|
2000-07-21 13:43:26 +02:00
|
|
|
te->formatData = (void *) ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->filename = ReadStr(AH);
|
|
|
|
if (strlen(ctx->filename) == 0)
|
|
|
|
{
|
|
|
|
free(ctx->filename);
|
|
|
|
ctx->filename = NULL;
|
|
|
|
}
|
|
|
|
ctx->TH = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_PrintExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
2004-03-03 22:28:55 +01:00
|
|
|
if (AH->public.verbose && ctx->filename != NULL)
|
2001-04-14 15:11:03 +02:00
|
|
|
ahprintf(AH, "-- File: %s\n", ctx->filename);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_StartData(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
tctx->TH = tarOpen(AH, tctx->filename, 'w');
|
|
|
|
}
|
|
|
|
|
|
|
|
static TAR_MEMBER *
|
|
|
|
tarOpen(ArchiveHandle *AH, const char *filename, char mode)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
TAR_MEMBER *tm;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
if (mode == 'r')
|
|
|
|
{
|
|
|
|
tm = _tarPositionTo(AH, filename);
|
|
|
|
if (!tm) /* Not found */
|
|
|
|
{
|
2010-02-23 17:55:22 +01:00
|
|
|
if (filename)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Couldn't find the requested file. Future: do SEEK(0) and
|
|
|
|
* retry.
|
|
|
|
*/
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not find file \"%s\" in archive", filename);
|
2010-02-23 17:55:22 +01:00
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
else
|
2010-02-23 17:55:22 +01:00
|
|
|
{
|
|
|
|
/* Any file OK, none left, so return NULL */
|
2000-07-21 13:43:26 +02:00
|
|
|
return NULL;
|
2010-02-23 17:55:22 +01:00
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
if (AH->compression_spec.algorithm == PG_COMPRESSION_NONE)
|
2000-07-21 13:43:26 +02:00
|
|
|
tm->nFH = ctx->tarFH;
|
|
|
|
else
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("compression is not supported by tar archive format");
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2015-09-21 02:42:27 +02:00
|
|
|
int old_umask;
|
|
|
|
|
2022-09-12 08:31:56 +02:00
|
|
|
tm = pg_malloc0_object(TAR_MEMBER);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2015-09-21 02:42:27 +02:00
|
|
|
/*
|
|
|
|
* POSIX does not require, but permits, tmpfile() to restrict file
|
|
|
|
* permissions. Given an OS crash after we write data, the filesystem
|
|
|
|
* might retain the data but forget tmpfile()'s unlink(). If so, the
|
|
|
|
* file mode protects confidentiality of the data written.
|
|
|
|
*/
|
|
|
|
old_umask = umask(S_IRWXG | S_IRWXO);
|
|
|
|
|
2006-06-27 03:16:58 +02:00
|
|
|
#ifndef WIN32
|
2000-07-21 13:43:26 +02:00
|
|
|
tm->tmpFH = tmpfile();
|
2006-06-27 03:16:58 +02:00
|
|
|
#else
|
2006-10-04 02:30:14 +02:00
|
|
|
|
2006-06-27 03:16:58 +02:00
|
|
|
/*
|
|
|
|
* On WIN32, tmpfile() generates a filename in the root directory,
|
|
|
|
* which requires administrative permissions on certain systems. Loop
|
|
|
|
* until we find a unique file name we can create.
|
|
|
|
*/
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
char *name;
|
|
|
|
int fd;
|
2006-10-04 02:30:14 +02:00
|
|
|
|
2006-06-27 03:16:58 +02:00
|
|
|
name = _tempnam(NULL, "pg_temp_");
|
|
|
|
if (name == NULL)
|
|
|
|
break;
|
|
|
|
fd = open(name, O_RDWR | O_CREAT | O_EXCL | O_BINARY |
|
2006-06-27 04:56:41 +02:00
|
|
|
O_TEMPORARY, S_IRUSR | S_IWUSR);
|
2006-06-27 03:16:58 +02:00
|
|
|
free(name);
|
|
|
|
|
|
|
|
if (fd != -1) /* created a file */
|
|
|
|
{
|
|
|
|
tm->tmpFH = fdopen(fd, "w+b");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (errno != EEXIST) /* failure other than file exists */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2001-01-12 05:32:07 +01:00
|
|
|
if (tm->tmpFH == NULL)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not generate temporary file name: %m");
|
2001-01-12 05:32:07 +01:00
|
|
|
|
2015-09-21 02:42:27 +02:00
|
|
|
umask(old_umask);
|
|
|
|
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
if (AH->compression_spec.algorithm == PG_COMPRESSION_NONE)
|
2000-07-21 13:43:26 +02:00
|
|
|
tm->nFH = tm->tmpFH;
|
2022-03-31 03:34:10 +02:00
|
|
|
else
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("compression is not supported by tar archive format");
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
tm->AH = AH;
|
2011-11-25 21:40:51 +01:00
|
|
|
tm->targetFile = pg_strdup(filename);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
tm->mode = mode;
|
|
|
|
tm->tarFH = ctx->tarFH;
|
|
|
|
|
|
|
|
return tm;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tarClose(ArchiveHandle *AH, TAR_MEMBER *th)
|
|
|
|
{
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
if (AH->compression_spec.algorithm != PG_COMPRESSION_NONE)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("compression is not supported by tar archive format");
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
if (th->mode == 'w')
|
|
|
|
_tarAddFile(AH, th); /* This will close the temp file */
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
/*
|
|
|
|
* else Nothing to do for normal read since we don't dup() normal file
|
|
|
|
* handle, and we don't use temp files.
|
|
|
|
*/
|
|
|
|
|
2022-06-16 21:50:56 +02:00
|
|
|
free(th->targetFile);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
th->nFH = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __NOT_USED__
|
|
|
|
static char *
|
2002-08-20 19:54:45 +02:00
|
|
|
tarGets(char *buf, size_t len, TAR_MEMBER *th)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
char *s;
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t cnt = 0;
|
2000-07-21 13:43:26 +02:00
|
|
|
char c = ' ';
|
|
|
|
int eof = 0;
|
|
|
|
|
|
|
|
/* Can't read past logical EOF */
|
|
|
|
if (len > (th->fileLen - th->pos))
|
|
|
|
len = th->fileLen - th->pos;
|
|
|
|
|
|
|
|
while (cnt < len && c != '\n')
|
|
|
|
{
|
|
|
|
if (_tarReadRaw(th->AH, &c, 1, th, NULL) <= 0)
|
|
|
|
{
|
|
|
|
eof = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
buf[cnt++] = c;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eof && cnt == 0)
|
|
|
|
s = NULL;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
buf[cnt++] = '\0';
|
|
|
|
s = buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s)
|
|
|
|
{
|
|
|
|
len = strlen(s);
|
|
|
|
th->pos += len;
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Just read bytes from the archive. This is the low level read routine
|
|
|
|
* that is used for ALL reads on a tar file.
|
|
|
|
*/
|
2002-08-20 19:54:45 +02:00
|
|
|
static size_t
|
|
|
|
_tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t avail;
|
|
|
|
size_t used = 0;
|
|
|
|
size_t res = 0;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2020-04-27 14:24:20 +02:00
|
|
|
Assert(th || fh);
|
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
avail = AH->lookaheadLen - AH->lookaheadPos;
|
|
|
|
if (avail > 0)
|
|
|
|
{
|
|
|
|
/* We have some lookahead bytes to use */
|
|
|
|
if (avail >= len) /* Just use the lookahead buffer */
|
|
|
|
used = len;
|
|
|
|
else
|
|
|
|
used = avail;
|
|
|
|
|
|
|
|
/* Copy, and adjust buffer pos */
|
2007-08-06 03:38:15 +02:00
|
|
|
memcpy(buf, AH->lookahead + AH->lookaheadPos, used);
|
2000-07-21 13:43:26 +02:00
|
|
|
AH->lookaheadPos += used;
|
|
|
|
|
|
|
|
/* Adjust required length */
|
|
|
|
len -= used;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read the file if len > 0 */
|
|
|
|
if (len > 0)
|
|
|
|
{
|
|
|
|
if (fh)
|
2014-05-06 02:27:16 +02:00
|
|
|
{
|
2000-07-21 13:43:26 +02:00
|
|
|
res = fread(&((char *) buf)[used], 1, len, fh);
|
2014-05-06 02:27:16 +02:00
|
|
|
if (res != len && !feof(fh))
|
|
|
|
READ_ERROR_EXIT(fh);
|
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
else if (th)
|
|
|
|
{
|
2022-03-31 03:34:10 +02:00
|
|
|
res = fread(&((char *) buf)[used], 1, len, th->nFH);
|
|
|
|
if (res != len && !feof(th->nFH))
|
|
|
|
READ_ERROR_EXIT(th->nFH);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->tarFHpos += res + used;
|
|
|
|
|
|
|
|
return (res + used);
|
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
static size_t
|
|
|
|
tarRead(void *buf, size_t len, TAR_MEMBER *th)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t res;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
if (th->pos + len > th->fileLen)
|
|
|
|
len = th->fileLen - th->pos;
|
|
|
|
|
|
|
|
if (len <= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
res = _tarReadRaw(th->AH, buf, len, th, NULL);
|
|
|
|
|
|
|
|
th->pos += res;
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
static size_t
|
|
|
|
tarWrite(const void *buf, size_t len, TAR_MEMBER *th)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t res;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2022-03-31 03:34:10 +02:00
|
|
|
res = fwrite(buf, 1, len, th->nFH);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
th->pos += res;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2002-08-20 19:54:45 +02:00
|
|
|
_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) AH->currToc->formatData;
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
if (tarWrite(data, dLen, tctx->TH) != dLen)
|
|
|
|
WRITE_ERROR_EXIT;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_EndData(ArchiveHandle *AH, TocEntry *te)
|
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
/* Close the file */
|
|
|
|
tarClose(AH, tctx->TH);
|
|
|
|
tctx->TH = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print data for a given file
|
|
|
|
*/
|
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_PrintFileData(ArchiveHandle *AH, char *filename)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
char buf[4096];
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t cnt;
|
2000-07-21 13:43:26 +02:00
|
|
|
TAR_MEMBER *th;
|
|
|
|
|
|
|
|
if (!filename)
|
|
|
|
return;
|
|
|
|
|
|
|
|
th = tarOpen(AH, filename, 'r');
|
|
|
|
ctx->FH = th;
|
|
|
|
|
|
|
|
while ((cnt = tarRead(buf, 4095, th)) > 0)
|
|
|
|
{
|
|
|
|
buf[cnt] = '\0';
|
|
|
|
ahwrite(buf, 1, cnt, AH);
|
|
|
|
}
|
|
|
|
|
|
|
|
tarClose(AH, th);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print data for a given TOC entry
|
|
|
|
*/
|
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_PrintTocData(ArchiveHandle *AH, TocEntry *te)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
2012-09-29 23:56:37 +02:00
|
|
|
int pos1;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
if (!tctx->filename)
|
|
|
|
return;
|
|
|
|
|
2012-09-29 23:56:37 +02:00
|
|
|
/*
|
|
|
|
* If we're writing the special restore.sql script, emit a suitable
|
|
|
|
* command to include each table's data from the corresponding file.
|
|
|
|
*
|
|
|
|
* In the COPY case this is a bit klugy because the regular COPY command
|
|
|
|
* was already printed before we get control.
|
|
|
|
*/
|
2000-07-21 13:43:26 +02:00
|
|
|
if (ctx->isSpecialScript)
|
|
|
|
{
|
2012-09-29 23:56:37 +02:00
|
|
|
if (te->copyStmt)
|
|
|
|
{
|
|
|
|
/* Abort the COPY FROM stdin */
|
|
|
|
ahprintf(AH, "\\.\n");
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2012-09-29 23:56:37 +02:00
|
|
|
/*
|
|
|
|
* The COPY statement should look like "COPY ... FROM stdin;\n",
|
|
|
|
* see dumpTableData().
|
|
|
|
*/
|
|
|
|
pos1 = (int) strlen(te->copyStmt) - 13;
|
|
|
|
if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 ||
|
|
|
|
strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("unexpected COPY statement syntax: \"%s\"",
|
|
|
|
te->copyStmt);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2012-09-29 23:56:37 +02:00
|
|
|
/* Emit all but the FROM part ... */
|
|
|
|
ahwrite(te->copyStmt, 1, pos1, AH);
|
|
|
|
/* ... and insert modified FROM */
|
|
|
|
ahprintf(AH, " FROM '$$PATH$$/%s';\n\n", tctx->filename);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* --inserts mode, no worries, just include the data file */
|
|
|
|
ahprintf(AH, "\\i $$PATH$$/%s\n\n", tctx->filename);
|
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(te->desc, "BLOBS") == 0)
|
2022-12-05 08:52:11 +01:00
|
|
|
_LoadLOs(AH);
|
2000-07-21 13:43:26 +02:00
|
|
|
else
|
2016-01-13 23:48:33 +01:00
|
|
|
_PrintFileData(AH, tctx->filename);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_LoadLOs(ArchiveHandle *AH)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2001-07-03 22:21:50 +02:00
|
|
|
Oid oid;
|
2000-07-21 13:43:26 +02:00
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
TAR_MEMBER *th;
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t cnt;
|
2022-12-05 08:52:11 +01:00
|
|
|
bool foundLO = false;
|
2000-07-21 13:43:26 +02:00
|
|
|
char buf[4096];
|
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
StartRestoreLOs(AH);
|
2000-10-31 15:20:30 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
th = tarOpen(AH, NULL, 'r'); /* Open next file */
|
|
|
|
while (th != NULL)
|
|
|
|
{
|
|
|
|
ctx->FH = th;
|
|
|
|
|
2002-05-29 03:38:56 +02:00
|
|
|
if (strncmp(th->targetFile, "blob_", 5) == 0)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2002-05-29 03:38:56 +02:00
|
|
|
oid = atooid(&th->targetFile[5]);
|
|
|
|
if (oid != 0)
|
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_info("restoring large object with OID %u", oid);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
StartRestoreLO(AH, oid, AH->public.ropt->dropSchema);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2002-05-29 03:38:56 +02:00
|
|
|
while ((cnt = tarRead(buf, 4095, th)) > 0)
|
|
|
|
{
|
|
|
|
buf[cnt] = '\0';
|
|
|
|
ahwrite(buf, 1, cnt, AH);
|
|
|
|
}
|
2022-12-05 08:52:11 +01:00
|
|
|
EndRestoreLO(AH, oid);
|
|
|
|
foundLO = true;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
2006-11-01 16:59:26 +01:00
|
|
|
tarClose(AH, th);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
tarClose(AH, th);
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2006-11-01 16:59:26 +01:00
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Once we have found the first LO, stop at the first non-LO entry
|
2006-11-01 16:59:26 +01:00
|
|
|
* (which will be 'blobs.toc'). This coding would eat all the
|
2022-12-05 08:52:11 +01:00
|
|
|
* rest of the archive if there are no LOs ... but this function
|
2006-11-01 16:59:26 +01:00
|
|
|
* shouldn't be called at all in that case.
|
|
|
|
*/
|
2022-12-05 08:52:11 +01:00
|
|
|
if (foundLO)
|
2006-11-01 16:59:26 +01:00
|
|
|
break;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
th = tarOpen(AH, NULL, 'r');
|
|
|
|
}
|
2022-12-05 08:52:11 +01:00
|
|
|
EndRestoreLOs(AH);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
_WriteByte(ArchiveHandle *AH, const int i)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2000-10-24 15:24:30 +02:00
|
|
|
char b = i; /* Avoid endian problems */
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
if (tarWrite(&b, 1, ctx->FH) != 1)
|
|
|
|
WRITE_ERROR_EXIT;
|
|
|
|
|
|
|
|
ctx->filePos += 1;
|
|
|
|
return 1;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
_ReadByte(ArchiveHandle *AH)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2007-08-06 03:38:15 +02:00
|
|
|
size_t res;
|
|
|
|
unsigned char c;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
res = tarRead(&c, 1, ctx->FH);
|
2007-08-06 03:38:15 +02:00
|
|
|
if (res != 1)
|
2014-05-06 02:27:16 +02:00
|
|
|
/* We already would have exited for errors on reads, must be EOF */
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not read from input file: end of file");
|
2007-08-06 03:38:15 +02:00
|
|
|
ctx->filePos += 1;
|
2000-07-21 13:43:26 +02:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2002-08-20 19:54:45 +02:00
|
|
|
_WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
if (tarWrite(buf, len, ctx->FH) != len)
|
|
|
|
WRITE_ERROR_EXIT;
|
|
|
|
|
|
|
|
ctx->filePos += len;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
static void
|
2002-08-20 19:54:45 +02:00
|
|
|
_ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
if (tarRead(buf, len, ctx->FH) != len)
|
|
|
|
/* We already would have exited for errors on reads, must be EOF */
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not read from input file: end of file");
|
2014-05-06 18:12:18 +02:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
ctx->filePos += len;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-01-13 23:48:33 +01:00
|
|
|
_CloseArchive(ArchiveHandle *AH)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
TAR_MEMBER *th;
|
|
|
|
RestoreOptions *ropt;
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
RestoreOptions *savRopt;
|
2016-01-13 23:48:33 +01:00
|
|
|
DumpOptions *savDopt;
|
2000-10-25 12:21:38 +02:00
|
|
|
int savVerbose,
|
|
|
|
i;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
if (AH->mode == archModeWrite)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Write the Header & TOC to the archive FIRST
|
|
|
|
*/
|
|
|
|
th = tarOpen(AH, "toc.dat", 'w');
|
|
|
|
ctx->FH = th;
|
|
|
|
WriteHead(AH);
|
|
|
|
WriteToc(AH);
|
|
|
|
tarClose(AH, th); /* Not needed any more */
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Now send the data (tables & LOs)
|
2000-07-21 13:43:26 +02:00
|
|
|
*/
|
2016-01-13 23:48:33 +01:00
|
|
|
WriteDataChunks(AH, NULL);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now this format wants to append a script which does a full restore
|
|
|
|
* if the files have been extracted.
|
|
|
|
*/
|
|
|
|
th = tarOpen(AH, "restore.sql", 'w');
|
2012-09-29 23:56:37 +02:00
|
|
|
|
2020-08-25 07:24:15 +02:00
|
|
|
tarPrintf(th, "--\n"
|
2000-07-21 13:43:26 +02:00
|
|
|
"-- NOTE:\n"
|
|
|
|
"--\n"
|
|
|
|
"-- File paths need to be edited. Search for $$PATH$$ and\n"
|
|
|
|
"-- replace it with the path to the directory containing\n"
|
|
|
|
"-- the extracted data files.\n"
|
|
|
|
"--\n");
|
|
|
|
|
|
|
|
AH->CustomOutPtr = _scriptOut;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
ctx->isSpecialScript = 1;
|
|
|
|
ctx->scriptTH = th;
|
|
|
|
|
|
|
|
ropt = NewRestoreOptions();
|
2016-01-13 23:48:33 +01:00
|
|
|
memcpy(ropt, AH->public.ropt, sizeof(RestoreOptions));
|
2012-06-12 03:55:48 +02:00
|
|
|
ropt->filename = NULL;
|
2000-07-21 13:43:26 +02:00
|
|
|
ropt->dropSchema = 1;
|
2002-05-11 00:36:27 +02:00
|
|
|
ropt->superuser = NULL;
|
2001-04-25 09:03:20 +02:00
|
|
|
ropt->suppressDumpWarnings = true;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2016-01-13 23:48:33 +01:00
|
|
|
savDopt = AH->public.dopt;
|
|
|
|
savRopt = AH->public.ropt;
|
|
|
|
|
|
|
|
SetArchiveOptions((Archive *) AH, NULL, ropt);
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
savVerbose = AH->public.verbose;
|
|
|
|
AH->public.verbose = 0;
|
|
|
|
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
RestoreArchive((Archive *) AH);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2016-01-13 23:48:33 +01:00
|
|
|
SetArchiveOptions((Archive *) AH, savDopt, savRopt);
|
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
AH->public.verbose = savVerbose;
|
|
|
|
|
|
|
|
tarClose(AH, th);
|
2000-10-25 12:21:38 +02:00
|
|
|
|
2012-09-29 23:56:37 +02:00
|
|
|
ctx->isSpecialScript = 0;
|
|
|
|
|
2012-09-28 21:19:15 +02:00
|
|
|
/*
|
|
|
|
* EOF marker for tar files is two blocks of NULLs.
|
|
|
|
*/
|
2020-04-24 16:38:10 +02:00
|
|
|
for (i = 0; i < TAR_BLOCK_SIZE * 2; i++)
|
2000-10-25 12:21:38 +02:00
|
|
|
{
|
2001-01-12 05:32:07 +01:00
|
|
|
if (fputc(0, ctx->tarFH) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
WRITE_ERROR_EXIT;
|
2000-10-25 12:21:38 +02:00
|
|
|
}
|
2017-03-22 15:00:30 +01:00
|
|
|
|
|
|
|
/* Sync the output file if one is defined */
|
|
|
|
if (AH->dosync && AH->fSpec)
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
(void) fsync_fname(AH->fSpec, false);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
AH->FH = NULL;
|
|
|
|
}
|
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
static size_t
|
|
|
|
_scriptOut(ArchiveHandle *AH, const void *buf, size_t len)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
return tarWrite(buf, len, ctx->scriptTH);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Large Object support
|
2000-07-21 13:43:26 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the archiver when starting to save all BLOB DATA (not schema).
|
|
|
|
* This routine should save whatever format-specific information is needed
|
2022-12-05 08:52:11 +01:00
|
|
|
* to read the LOs back into memory.
|
2000-07-21 13:43:26 +02:00
|
|
|
*
|
|
|
|
* It is called just prior to the dumper's DataDumper routine.
|
|
|
|
*
|
|
|
|
* Optional, but strongly recommended.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_StartLOs(ArchiveHandle *AH, TocEntry *te)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
char fname[K_STD_BUF_SIZE];
|
|
|
|
|
|
|
|
sprintf(fname, "blobs.toc");
|
2022-12-05 08:52:11 +01:00
|
|
|
ctx->loToc = tarOpen(AH, fname, 'w');
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Called by the archiver when the dumper calls StartLO.
|
2000-07-21 13:43:26 +02:00
|
|
|
*
|
|
|
|
* Mandatory.
|
|
|
|
*
|
|
|
|
* Must save the passed OID for retrieval at restore-time.
|
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
char fname[255];
|
|
|
|
|
|
|
|
if (oid == 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("invalid OID for large object (%u)", oid);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
Switch pg_dump to use compression specifications
Compression specifications are currently used by pg_basebackup and
pg_receivewal, and are able to let the user control in an extended way
the method and level of compression used. As an effect of this commit,
pg_dump's -Z/--compress is now able to use more than just an integer, as
of the grammar "method[:detail]".
The method can be either "none" or "gzip", and can optionally take a
detail string. If the detail string is only an integer, it defines the
compression level. A comma-separated list of keywords can also be used
method allows for more options, the only keyword supported now is
"level".
The change is backward-compatible, hence specifying only an integer
leads to no compression for a level of 0 and gzip compression when the
level is greater than 0.
Most of the code changes are straight-forward, as pg_dump was relying on
an integer tracking the compression level to check for gzip or no
compression. These are changed to use a compression specification and
the algorithm stored in it.
As of this change, note that the dump format is not bumped because there
is no need yet to track the compression algorithm in the TOC entries.
Hence, we still rely on the compression level to make the difference
when reading them. This will be mandatory once a new compression method
is added, though.
In order to keep the code simpler when parsing the compression
specification, the code is changed so as pg_dump now fails hard when
using gzip on -Z/--compress without its support compiled, rather than
enforcing no compression without the user knowing about it except
through a warning. Like before this commit, archive and custom formats
are compressed by default when the code is compiled with gzip, and left
uncompressed without gzip.
Author: Georgios Kokolatos
Reviewed-by: Michael Paquier
Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
|
|
|
if (AH->compression_spec.algorithm != PG_COMPRESSION_NONE)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("compression is not supported by tar archive format");
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2022-03-31 03:34:10 +02:00
|
|
|
sprintf(fname, "blob_%u.dat", oid);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
tarPrintf(ctx->loToc, "%u %s\n", oid, fname);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
tctx->TH = tarOpen(AH, fname, 'w');
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-12-05 08:52:11 +01:00
|
|
|
* Called by the archiver when the dumper calls EndLO.
|
2000-07-21 13:43:26 +02:00
|
|
|
*
|
|
|
|
* Optional.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
|
|
|
|
|
|
tarClose(AH, tctx->TH);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by the archiver when finishing saving all BLOB DATA.
|
|
|
|
*
|
|
|
|
* Optional.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
2022-12-05 08:52:11 +01:00
|
|
|
_EndLOs(ArchiveHandle *AH, TocEntry *te)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
/* Write out a fake zero OID to mark end-of-LOs. */
|
2000-07-21 13:43:26 +02:00
|
|
|
/* WriteInt(AH, 0); */
|
|
|
|
|
2022-12-05 08:52:11 +01:00
|
|
|
tarClose(AH, ctx->loToc);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*------------
|
|
|
|
* TAR Support
|
|
|
|
*------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
2020-08-25 07:24:15 +02:00
|
|
|
tarPrintf(TAR_MEMBER *th, const char *fmt,...)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2018-09-26 19:31:56 +02:00
|
|
|
int save_errno = errno;
|
2013-10-25 03:43:57 +02:00
|
|
|
char *p;
|
|
|
|
size_t len = 128; /* initial assumption about buffer size */
|
|
|
|
size_t cnt;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2013-10-25 03:43:57 +02:00
|
|
|
for (;;)
|
2001-02-23 23:52:32 +01:00
|
|
|
{
|
2013-10-25 03:43:57 +02:00
|
|
|
va_list args;
|
|
|
|
|
|
|
|
/* Allocate work buffer. */
|
|
|
|
p = (char *) pg_malloc(len);
|
|
|
|
|
|
|
|
/* Try to format the data. */
|
2018-09-26 19:31:56 +02:00
|
|
|
errno = save_errno;
|
2013-10-25 03:43:57 +02:00
|
|
|
va_start(args, fmt);
|
|
|
|
cnt = pvsnprintf(p, len, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
if (cnt < len)
|
|
|
|
break; /* success */
|
|
|
|
|
|
|
|
/* Release buffer and loop around to try again with larger len. */
|
|
|
|
free(p);
|
|
|
|
len = cnt;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
2013-10-25 03:43:57 +02:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
cnt = tarWrite(p, cnt, th);
|
|
|
|
free(p);
|
2013-10-25 03:43:57 +02:00
|
|
|
return (int) cnt;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
2003-02-01 20:29:16 +01:00
|
|
|
bool
|
2000-07-21 13:43:26 +02:00
|
|
|
isValidTarHeader(char *header)
|
|
|
|
{
|
|
|
|
int sum;
|
2013-01-01 18:15:57 +01:00
|
|
|
int chk = tarChecksum(header);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2023-08-01 19:50:42 +02:00
|
|
|
sum = read_tar_number(&header[TAR_OFFSET_CHECKSUM], 8);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2003-02-01 20:29:16 +01:00
|
|
|
if (sum != chk)
|
|
|
|
return false;
|
|
|
|
|
2012-09-28 21:19:15 +02:00
|
|
|
/* POSIX tar format */
|
2023-08-01 19:50:42 +02:00
|
|
|
if (memcmp(&header[TAR_OFFSET_MAGIC], "ustar\0", 6) == 0 &&
|
|
|
|
memcmp(&header[TAR_OFFSET_VERSION], "00", 2) == 0)
|
2003-02-01 20:29:16 +01:00
|
|
|
return true;
|
2012-09-28 21:19:15 +02:00
|
|
|
/* GNU tar format */
|
2023-08-01 19:50:42 +02:00
|
|
|
if (memcmp(&header[TAR_OFFSET_MAGIC], "ustar \0", 8) == 0)
|
2012-09-28 21:19:15 +02:00
|
|
|
return true;
|
|
|
|
/* not-quite-POSIX format written by pre-9.3 pg_dump */
|
2023-08-01 19:50:42 +02:00
|
|
|
if (memcmp(&header[TAR_OFFSET_MAGIC], "ustar00\0", 8) == 0)
|
2003-02-01 20:29:16 +01:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Given the member, write the TAR header & copy the file */
|
|
|
|
static void
|
|
|
|
_tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
|
|
|
FILE *tmp = th->tmpFH; /* Grab it for convenience */
|
|
|
|
char buf[32768];
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t cnt;
|
2007-02-19 16:05:06 +01:00
|
|
|
pgoff_t len = 0;
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t res;
|
|
|
|
size_t i,
|
2000-07-21 13:43:26 +02:00
|
|
|
pad;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find file len & go back to start.
|
|
|
|
*/
|
2020-08-09 18:39:07 +02:00
|
|
|
if (fseeko(tmp, 0, SEEK_END) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("error during file seek: %m");
|
2002-08-20 19:54:45 +02:00
|
|
|
th->fileLen = ftello(tmp);
|
2014-02-10 00:28:14 +01:00
|
|
|
if (th->fileLen < 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not determine seek position in archive file: %m");
|
2020-08-09 18:39:07 +02:00
|
|
|
if (fseeko(tmp, 0, SEEK_SET) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("error during file seek: %m");
|
2005-10-15 04:49:52 +02:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
_tarWriteHeader(th);
|
|
|
|
|
2007-08-29 18:31:36 +02:00
|
|
|
while ((cnt = fread(buf, 1, sizeof(buf), tmp)) > 0)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2014-05-06 02:27:16 +02:00
|
|
|
if ((res = fwrite(buf, 1, cnt, th->tarFH)) != cnt)
|
|
|
|
WRITE_ERROR_EXIT;
|
2001-01-12 05:32:07 +01:00
|
|
|
len += res;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
2014-05-06 02:27:16 +02:00
|
|
|
if (!feof(tmp))
|
|
|
|
READ_ERROR_EXIT(tmp);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2001-01-12 05:32:07 +01:00
|
|
|
if (fclose(tmp) != 0) /* This *should* delete it... */
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not close temporary file: %m");
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
if (len != th->fileLen)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("actual file length (%lld) does not match expected (%lld)",
|
|
|
|
(long long) len, (long long) th->fileLen);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2020-04-24 16:38:10 +02:00
|
|
|
pad = tarPaddingBytesRequired(len);
|
2000-07-21 13:43:26 +02:00
|
|
|
for (i = 0; i < pad; i++)
|
2001-01-12 05:32:07 +01:00
|
|
|
{
|
|
|
|
if (fputc('\0', th->tarFH) == EOF)
|
2014-05-06 02:27:16 +02:00
|
|
|
WRITE_ERROR_EXIT;
|
2001-01-12 05:32:07 +01:00
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
ctx->tarFHpos += len + pad;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Locate the file in the archive, read header and position to data */
|
|
|
|
static TAR_MEMBER *
|
|
|
|
_tarPositionTo(ArchiveHandle *AH, const char *filename)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2022-09-12 08:31:56 +02:00
|
|
|
TAR_MEMBER *th = pg_malloc0_object(TAR_MEMBER);
|
2000-07-21 13:43:26 +02:00
|
|
|
char c;
|
2020-04-24 16:38:10 +02:00
|
|
|
char header[TAR_BLOCK_SIZE];
|
2002-08-20 19:54:45 +02:00
|
|
|
size_t i,
|
2000-07-21 13:43:26 +02:00
|
|
|
len,
|
2002-08-20 19:54:45 +02:00
|
|
|
blks;
|
|
|
|
int id;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
th->AH = AH;
|
|
|
|
|
|
|
|
/* Go to end of current file, if any */
|
|
|
|
if (ctx->tarFHpos != 0)
|
|
|
|
{
|
2022-03-18 18:10:04 +01:00
|
|
|
pg_log_debug("moving from position %lld to next member at file position %lld",
|
|
|
|
(long long) ctx->tarFHpos, (long long) ctx->tarNextMember);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
while (ctx->tarFHpos < ctx->tarNextMember)
|
|
|
|
_tarReadRaw(AH, &c, 1, NULL, ctx->tarFH);
|
|
|
|
}
|
|
|
|
|
2022-03-18 18:10:04 +01:00
|
|
|
pg_log_debug("now at file position %lld", (long long) ctx->tarFHpos);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2010-02-23 17:55:22 +01:00
|
|
|
/* We are at the start of the file, or at the next member */
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
/* Get the header */
|
|
|
|
if (!_tarGetHeader(AH, th))
|
|
|
|
{
|
|
|
|
if (filename)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not find header for file \"%s\" in tar archive", filename);
|
2000-07-21 13:43:26 +02:00
|
|
|
else
|
2010-02-23 17:55:22 +01:00
|
|
|
{
|
2004-10-07 17:21:58 +02:00
|
|
|
/*
|
2010-02-23 17:55:22 +01:00
|
|
|
* We're just scanning the archive for the next file, so return
|
2004-10-07 17:21:58 +02:00
|
|
|
* null
|
|
|
|
*/
|
2000-07-21 13:43:26 +02:00
|
|
|
free(th);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (filename != NULL && strcmp(th->targetFile, filename) != 0)
|
|
|
|
{
|
Unified logging system for command-line programs
This unifies the various ad hoc logging (message printing, error
printing) systems used throughout the command-line programs.
Features:
- Program name is automatically prefixed.
- Message string does not end with newline. This removes a common
source of inconsistencies and omissions.
- Additionally, a final newline is automatically stripped, simplifying
use of PQerrorMessage() etc., another common source of mistakes.
- I converted error message strings to use %m where possible.
- As a result of the above several points, more translatable message
strings can be shared between different components and between
frontends and backend, without gratuitous punctuation or whitespace
differences.
- There is support for setting a "log level". This is not meant to be
user-facing, but can be used internally to implement debug or
verbose modes.
- Lazy argument evaluation, so no significant overhead if logging at
some level is disabled.
- Some color in the messages, similar to gcc and clang. Set
PG_COLOR=auto to try it out. Some colors are predefined, but can be
customized by setting PG_COLORS.
- Common files (common/, fe_utils/, etc.) can handle logging much more
simply by just using one API without worrying too much about the
context of the calling program, requiring callbacks, or having to
pass "progname" around everywhere.
- Some programs called setvbuf() to make sure that stderr is
unbuffered, even on Windows. But not all programs did that. This
is now done centrally.
Soft goals:
- Reduces vertical space use and visual complexity of error reporting
in the source code.
- Encourages more deliberate classification of messages. For example,
in some cases it wasn't clear without analyzing the surrounding code
whether a message was meant as an error or just an info.
- Concepts and terms are vaguely aligned with popular logging
frameworks such as log4j and Python logging.
This is all just about printing stuff out. Nothing affects program
flow (e.g., fatal exits). The uses are just too varied to do that.
Some existing code had wrappers that do some kind of print-and-exit,
and I adapted those.
I tried to keep the output mostly the same, but there is a lot of
historical baggage to unwind and special cases to consider, and I
might not always have succeeded. One significant change is that
pg_rewind used to write all error messages to stdout. That is now
changed to stderr.
Reviewed-by: Donald Dong <xdong@csumb.edu>
Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru>
Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
|
|
|
pg_log_debug("skipping tar member %s", th->targetFile);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
id = atoi(th->targetFile);
|
Rewrite --section option to decouple it from --schema-only/--data-only.
The initial implementation of pg_dump's --section option supposed that the
existing --schema-only and --data-only options could be made equivalent to
--section settings. This is wrong, though, due to dubious but long since
set-in-stone decisions about where to dump SEQUENCE SET items, as seen in
bug report from Martin Pitt. (And I'm not totally convinced there weren't
other bugs, either.) Undo that coupling and instead drive --section
filtering off current-section state tracked as we scan through the TOC
list to call _tocEntryRequired().
To make sure those decisions don't shift around and hopefully save a few
cycles, run _tocEntryRequired() only once per TOC entry and save the result
in a new TOC field. This required minor rejiggering of ACL handling but
also allows a far cleaner implementation of inhibit_data_for_failed_table.
Also, to ensure that pg_dump and pg_restore have the same behavior with
respect to the --section switches, add _tocEntryRequired() filtering to
WriteToc() and WriteDataChunks(), rather than trying to implement section
filtering in an entirely orthogonal way in dumpDumpableObject(). This
required adjusting the handling of the special ENCODING and STDSTRINGS
items, but they were pretty weird before anyway.
Minor other code review for the patch, too.
2012-05-30 05:22:14 +02:00
|
|
|
if ((TocIDRequired(AH, id) & REQ_DATA) != 0)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("restoring data out of order is not supported in this archive format: "
|
|
|
|
"\"%s\" is required, but comes before \"%s\" in the archive file.",
|
|
|
|
th->targetFile, filename);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
/* Header doesn't match, so read to next header */
|
2020-04-24 16:38:10 +02:00
|
|
|
len = th->fileLen;
|
|
|
|
len += tarPaddingBytesRequired(th->fileLen);
|
|
|
|
blks = len / TAR_BLOCK_SIZE; /* # of tar blocks */
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
for (i = 0; i < blks; i++)
|
2020-04-24 16:38:10 +02:00
|
|
|
_tarReadRaw(AH, &header[0], TAR_BLOCK_SIZE, NULL, ctx->tarFH);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
if (!_tarGetHeader(AH, th))
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("could not find header for file \"%s\" in tar archive", filename);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
2020-04-24 16:38:10 +02:00
|
|
|
ctx->tarNextMember = ctx->tarFHpos + th->fileLen
|
|
|
|
+ tarPaddingBytesRequired(th->fileLen);
|
2000-07-21 13:43:26 +02:00
|
|
|
th->pos = 0;
|
|
|
|
|
|
|
|
return th;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read & verify a header */
|
|
|
|
static int
|
|
|
|
_tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
|
|
|
|
{
|
|
|
|
lclContext *ctx = (lclContext *) AH->formatData;
|
2020-04-24 16:38:10 +02:00
|
|
|
char h[TAR_BLOCK_SIZE];
|
Adopt the GNU convention for handling tar-archive members exceeding 8GB.
The POSIX standard for tar headers requires archive member sizes to be
printed in octal with at most 11 digits, limiting the representable file
size to 8GB. However, GNU tar and apparently most other modern tars
support a convention in which oversized values can be stored in base-256,
allowing any practical file to be a tar member. Adopt this convention
to remove two limitations:
* pg_dump with -Ft output format failed if the contents of any one table
exceeded 8GB.
* pg_basebackup failed if the data directory contained any file exceeding
8GB. (This would be a fatal problem for installations configured with a
table segment size of 8GB or more, and it has also been seen to fail when
large core dump files exist in the data directory.)
File sizes under 8GB are still printed in octal, so that no compatibility
issues are created except in cases that would have failed entirely before.
In addition, this patch fixes several bugs in the same area:
* In 9.3 and later, we'd defined tarCreateHeader's file-size argument as
size_t, which meant that on 32-bit machines it would write a corrupt tar
header for file sizes between 4GB and 8GB, even though no error was raised.
This broke both "pg_dump -Ft" and pg_basebackup for such cases.
* pg_restore from a tar archive would fail on tables of size between 4GB
and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits.
This happened even with an archive file not affected by the previous bug.
* pg_basebackup would fail if there were files of size between 4GB and 8GB,
even on 64-bit machines.
* In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size,
on 64-bit big-endian machines.
In view of these potential data-loss bugs, back-patch to all supported
branches, even though removal of the documented 8GB limit might otherwise
be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
|
|
|
char tag[100 + 1];
|
2000-07-21 13:43:26 +02:00
|
|
|
int sum,
|
|
|
|
chk;
|
Adopt the GNU convention for handling tar-archive members exceeding 8GB.
The POSIX standard for tar headers requires archive member sizes to be
printed in octal with at most 11 digits, limiting the representable file
size to 8GB. However, GNU tar and apparently most other modern tars
support a convention in which oversized values can be stored in base-256,
allowing any practical file to be a tar member. Adopt this convention
to remove two limitations:
* pg_dump with -Ft output format failed if the contents of any one table
exceeded 8GB.
* pg_basebackup failed if the data directory contained any file exceeding
8GB. (This would be a fatal problem for installations configured with a
table segment size of 8GB or more, and it has also been seen to fail when
large core dump files exist in the data directory.)
File sizes under 8GB are still printed in octal, so that no compatibility
issues are created except in cases that would have failed entirely before.
In addition, this patch fixes several bugs in the same area:
* In 9.3 and later, we'd defined tarCreateHeader's file-size argument as
size_t, which meant that on 32-bit machines it would write a corrupt tar
header for file sizes between 4GB and 8GB, even though no error was raised.
This broke both "pg_dump -Ft" and pg_basebackup for such cases.
* pg_restore from a tar archive would fail on tables of size between 4GB
and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits.
This happened even with an archive file not affected by the previous bug.
* pg_basebackup would fail if there were files of size between 4GB and 8GB,
even on 64-bit machines.
* In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size,
on 64-bit big-endian machines.
In view of these potential data-loss bugs, back-patch to all supported
branches, even though removal of the documented 8GB limit might otherwise
be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
|
|
|
pgoff_t len;
|
2007-02-19 16:05:06 +01:00
|
|
|
pgoff_t hPos;
|
2001-02-13 02:31:54 +01:00
|
|
|
bool gotBlock = false;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2001-02-13 02:31:54 +01:00
|
|
|
while (!gotBlock)
|
|
|
|
{
|
|
|
|
/* Save the pos for reporting purposes */
|
|
|
|
hPos = ctx->tarFHpos;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2020-04-24 16:38:10 +02:00
|
|
|
/* Read the next tar block, return EOF, exit if short */
|
|
|
|
len = _tarReadRaw(AH, h, TAR_BLOCK_SIZE, NULL, ctx->tarFH);
|
2001-02-13 02:31:54 +01:00
|
|
|
if (len == 0) /* EOF */
|
|
|
|
return 0;
|
|
|
|
|
2020-04-24 16:38:10 +02:00
|
|
|
if (len != TAR_BLOCK_SIZE)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal(ngettext("incomplete tar header found (%lu byte)",
|
|
|
|
"incomplete tar header found (%lu bytes)",
|
|
|
|
len),
|
|
|
|
(unsigned long) len);
|
2001-02-13 02:31:54 +01:00
|
|
|
|
|
|
|
/* Calc checksum */
|
2013-01-01 18:15:57 +01:00
|
|
|
chk = tarChecksum(h);
|
2023-08-01 19:50:42 +02:00
|
|
|
sum = read_tar_number(&h[TAR_OFFSET_CHECKSUM], 8);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2001-02-13 02:31:54 +01:00
|
|
|
/*
|
2005-06-22 04:00:47 +02:00
|
|
|
* If the checksum failed, see if it is a null block. If so, silently
|
|
|
|
* continue to the next block.
|
2001-02-13 02:31:54 +01:00
|
|
|
*/
|
|
|
|
if (chk == sum)
|
|
|
|
gotBlock = true;
|
|
|
|
else
|
|
|
|
{
|
2005-06-22 04:00:47 +02:00
|
|
|
int i;
|
|
|
|
|
2020-04-24 16:38:10 +02:00
|
|
|
for (i = 0; i < TAR_BLOCK_SIZE; i++)
|
2001-02-13 02:31:54 +01:00
|
|
|
{
|
2005-06-22 04:00:47 +02:00
|
|
|
if (h[i] != 0)
|
2001-02-13 02:31:54 +01:00
|
|
|
{
|
|
|
|
gotBlock = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
|
Adopt the GNU convention for handling tar-archive members exceeding 8GB.
The POSIX standard for tar headers requires archive member sizes to be
printed in octal with at most 11 digits, limiting the representable file
size to 8GB. However, GNU tar and apparently most other modern tars
support a convention in which oversized values can be stored in base-256,
allowing any practical file to be a tar member. Adopt this convention
to remove two limitations:
* pg_dump with -Ft output format failed if the contents of any one table
exceeded 8GB.
* pg_basebackup failed if the data directory contained any file exceeding
8GB. (This would be a fatal problem for installations configured with a
table segment size of 8GB or more, and it has also been seen to fail when
large core dump files exist in the data directory.)
File sizes under 8GB are still printed in octal, so that no compatibility
issues are created except in cases that would have failed entirely before.
In addition, this patch fixes several bugs in the same area:
* In 9.3 and later, we'd defined tarCreateHeader's file-size argument as
size_t, which meant that on 32-bit machines it would write a corrupt tar
header for file sizes between 4GB and 8GB, even though no error was raised.
This broke both "pg_dump -Ft" and pg_basebackup for such cases.
* pg_restore from a tar archive would fail on tables of size between 4GB
and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits.
This happened even with an archive file not affected by the previous bug.
* pg_basebackup would fail if there were files of size between 4GB and 8GB,
even on 64-bit machines.
* In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size,
on 64-bit big-endian machines.
In view of these potential data-loss bugs, back-patch to all supported
branches, even though removal of the documented 8GB limit might otherwise
be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
|
|
|
/* Name field is 100 bytes, might not be null-terminated */
|
2023-08-01 19:50:42 +02:00
|
|
|
strlcpy(tag, &h[TAR_OFFSET_NAME], 100 + 1);
|
Adopt the GNU convention for handling tar-archive members exceeding 8GB.
The POSIX standard for tar headers requires archive member sizes to be
printed in octal with at most 11 digits, limiting the representable file
size to 8GB. However, GNU tar and apparently most other modern tars
support a convention in which oversized values can be stored in base-256,
allowing any practical file to be a tar member. Adopt this convention
to remove two limitations:
* pg_dump with -Ft output format failed if the contents of any one table
exceeded 8GB.
* pg_basebackup failed if the data directory contained any file exceeding
8GB. (This would be a fatal problem for installations configured with a
table segment size of 8GB or more, and it has also been seen to fail when
large core dump files exist in the data directory.)
File sizes under 8GB are still printed in octal, so that no compatibility
issues are created except in cases that would have failed entirely before.
In addition, this patch fixes several bugs in the same area:
* In 9.3 and later, we'd defined tarCreateHeader's file-size argument as
size_t, which meant that on 32-bit machines it would write a corrupt tar
header for file sizes between 4GB and 8GB, even though no error was raised.
This broke both "pg_dump -Ft" and pg_basebackup for such cases.
* pg_restore from a tar archive would fail on tables of size between 4GB
and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits.
This happened even with an archive file not affected by the previous bug.
* pg_basebackup would fail if there were files of size between 4GB and 8GB,
even on 64-bit machines.
* In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size,
on 64-bit big-endian machines.
In view of these potential data-loss bugs, back-patch to all supported
branches, even though removal of the documented 8GB limit might otherwise
be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
|
|
|
|
2023-08-01 19:50:42 +02:00
|
|
|
len = read_tar_number(&h[TAR_OFFSET_SIZE], 12);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2022-03-18 18:10:04 +01:00
|
|
|
pg_log_debug("TOC Entry %s at %llu (length %llu, checksum %d)",
|
|
|
|
tag, (unsigned long long) hPos, (unsigned long long) len, sum);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
if (chk != sum)
|
2022-04-08 20:55:14 +02:00
|
|
|
pg_fatal("corrupt tar header found in %s (expected %d, computed %d) file position %llu",
|
|
|
|
tag, sum, chk, (unsigned long long) ftello(ctx->tarFH));
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2011-11-25 21:40:51 +01:00
|
|
|
th->targetFile = pg_strdup(tag);
|
2000-07-21 13:43:26 +02:00
|
|
|
th->fileLen = len;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2002-09-06 23:58:36 +02:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
static void
|
|
|
|
_tarWriteHeader(TAR_MEMBER *th)
|
|
|
|
{
|
2020-04-24 16:38:10 +02:00
|
|
|
char h[TAR_BLOCK_SIZE];
|
2000-07-21 13:43:26 +02:00
|
|
|
|
Adopt the GNU convention for handling tar-archive members exceeding 8GB.
The POSIX standard for tar headers requires archive member sizes to be
printed in octal with at most 11 digits, limiting the representable file
size to 8GB. However, GNU tar and apparently most other modern tars
support a convention in which oversized values can be stored in base-256,
allowing any practical file to be a tar member. Adopt this convention
to remove two limitations:
* pg_dump with -Ft output format failed if the contents of any one table
exceeded 8GB.
* pg_basebackup failed if the data directory contained any file exceeding
8GB. (This would be a fatal problem for installations configured with a
table segment size of 8GB or more, and it has also been seen to fail when
large core dump files exist in the data directory.)
File sizes under 8GB are still printed in octal, so that no compatibility
issues are created except in cases that would have failed entirely before.
In addition, this patch fixes several bugs in the same area:
* In 9.3 and later, we'd defined tarCreateHeader's file-size argument as
size_t, which meant that on 32-bit machines it would write a corrupt tar
header for file sizes between 4GB and 8GB, even though no error was raised.
This broke both "pg_dump -Ft" and pg_basebackup for such cases.
* pg_restore from a tar archive would fail on tables of size between 4GB
and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits.
This happened even with an archive file not affected by the previous bug.
* pg_basebackup would fail if there were files of size between 4GB and 8GB,
even on 64-bit machines.
* In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size,
on 64-bit big-endian machines.
In view of these potential data-loss bugs, back-patch to all supported
branches, even though removal of the documented 8GB limit might otherwise
be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
|
|
|
tarCreateHeader(h, th->targetFile, NULL, th->fileLen,
|
|
|
|
0600, 04000, 02000, time(NULL));
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2012-09-28 21:19:15 +02:00
|
|
|
/* Now write the completed header. */
|
2020-04-24 16:38:10 +02:00
|
|
|
if (fwrite(h, 1, TAR_BLOCK_SIZE, th->tarFH) != TAR_BLOCK_SIZE)
|
2014-05-06 02:27:16 +02:00
|
|
|
WRITE_ERROR_EXIT;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|