postgresql/src/bin/pg_dump/pg_backup_tar.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1208 lines
26 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* pg_backup_tar.c
*
* This file is copied from the 'files' format file, but dumps data into
* one temp file then sends it to the output TAR archive.
*
* The tar format also includes a 'restore.sql' script which is there for
* the benefit of humans. This script is never used by pg_restore.
*
* NOTE: If you untar the created 'tar' file, the resulting files are
* compatible with the 'directory' format. Please keep the two formats in
* sync.
*
* See the headers to pg_backup_directory & pg_restore for more details.
*
* Copyright (c) 2000, Philip Warner
* Rights are granted to use this software in any way so long
* as this notice is not removed.
*
* The author is not responsible for loss or damages that may
* result from its use.
*
*
* IDENTIFICATION
2010-09-20 22:08:53 +02:00
* src/bin/pg_dump/pg_backup_tar.c
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <sys/stat.h>
#include <ctype.h>
#include <limits.h>
#include <unistd.h>
#include "common/file_utils.h"
#include "fe_utils/string_utils.h"
#include "pg_backup_archiver.h"
#include "pg_backup_tar.h"
#include "pg_backup_utils.h"
#include "pgtar.h"
static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te);
static void _StartData(ArchiveHandle *AH, TocEntry *te);
static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
static void _EndData(ArchiveHandle *AH, TocEntry *te);
static int _WriteByte(ArchiveHandle *AH, const int i);
static int _ReadByte(ArchiveHandle *AH);
static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
static void _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
static void _CloseArchive(ArchiveHandle *AH);
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te);
static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
2001-03-22 05:01:46 +01:00
static void _StartLOs(ArchiveHandle *AH, TocEntry *te);
static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid);
static void _EndLOs(ArchiveHandle *AH, TocEntry *te);
#define K_STD_BUF_SIZE 1024
typedef struct
{
FILE *nFH;
FILE *tarFH;
FILE *tmpFH;
char *targetFile;
char mode;
pgoff_t pos;
pgoff_t fileLen;
ArchiveHandle *AH;
} TAR_MEMBER;
typedef struct
{
int hasSeek;
pgoff_t filePos;
TAR_MEMBER *loToc;
FILE *tarFH;
pgoff_t tarFHpos;
pgoff_t tarNextMember;
TAR_MEMBER *FH;
int isSpecialScript;
TAR_MEMBER *scriptTH;
} lclContext;
typedef struct
{
TAR_MEMBER *TH;
char *filename;
} lclTocEntry;
static void _LoadLOs(ArchiveHandle *AH);
static TAR_MEMBER *tarOpen(ArchiveHandle *AH, const char *filename, char mode);
static void tarClose(ArchiveHandle *AH, TAR_MEMBER *th);
#ifdef __NOT_USED__
static char *tarGets(char *buf, size_t len, TAR_MEMBER *th);
#endif
static int tarPrintf(TAR_MEMBER *th, const char *fmt,...) pg_attribute_printf(2, 3);
static void _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th);
static TAR_MEMBER *_tarPositionTo(ArchiveHandle *AH, const char *filename);
static size_t tarRead(void *buf, size_t len, TAR_MEMBER *th);
static size_t tarWrite(const void *buf, size_t len, TAR_MEMBER *th);
static void _tarWriteHeader(TAR_MEMBER *th);
static int _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th);
static size_t _tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh);
static size_t _scriptOut(ArchiveHandle *AH, const void *buf, size_t len);
/*
* Initializer
*/
void
InitArchiveFmt_Tar(ArchiveHandle *AH)
{
lclContext *ctx;
2001-03-22 05:01:46 +01:00
/* Assuming static functions, this can be copied for each format. */
AH->ArchiveEntryPtr = _ArchiveEntry;
AH->StartDataPtr = _StartData;
AH->WriteDataPtr = _WriteData;
AH->EndDataPtr = _EndData;
AH->WriteBytePtr = _WriteByte;
AH->ReadBytePtr = _ReadByte;
AH->WriteBufPtr = _WriteBuf;
AH->ReadBufPtr = _ReadBuf;
AH->ClosePtr = _CloseArchive;
AH->ReopenPtr = NULL;
AH->PrintTocDataPtr = _PrintTocData;
AH->ReadExtraTocPtr = _ReadExtraToc;
AH->WriteExtraTocPtr = _WriteExtraToc;
AH->PrintExtraTocPtr = _PrintExtraToc;
2001-03-22 05:01:46 +01:00
AH->StartLOsPtr = _StartLOs;
AH->StartLOPtr = _StartLO;
AH->EndLOPtr = _EndLO;
AH->EndLOsPtr = _EndLOs;
AH->ClonePtr = NULL;
AH->DeClonePtr = NULL;
2001-03-22 05:01:46 +01:00
AH->WorkerJobDumpPtr = NULL;
AH->WorkerJobRestorePtr = NULL;
/*
* Set up some special context used in compressing data.
*/
ctx = pg_malloc0_object(lclContext);
AH->formatData = (void *) ctx;
ctx->filePos = 0;
ctx->isSpecialScript = 0;
2004-08-29 07:07:03 +02:00
/* Initialize LO buffering */
AH->lo_buf_size = LOBBUFSIZE;
AH->lo_buf = (void *) pg_malloc(LOBBUFSIZE);
2001-03-22 05:01:46 +01:00
/*
* Now open the tar file, and load the TOC if we're in read mode.
*/
if (AH->mode == archModeWrite)
{
if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
{
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_W);
if (ctx->tarFH == NULL)
pg_fatal("could not open TOC file \"%s\" for output: %m",
AH->fSpec);
}
else
{
ctx->tarFH = stdout;
if (ctx->tarFH == NULL)
pg_fatal("could not open TOC file for output: %m");
}
ctx->tarFHpos = 0;
/*
* Make unbuffered since we will dup() it, and the buffers screw each
* other
*/
/* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
ctx->hasSeek = checkSeek(ctx->tarFH);
/*
* We don't support compression because reading the files back is not
* possible since gzdopen uses buffered IO which totally screws file
* positioning.
*/
Switch pg_dump to use compression specifications Compression specifications are currently used by pg_basebackup and pg_receivewal, and are able to let the user control in an extended way the method and level of compression used. As an effect of this commit, pg_dump's -Z/--compress is now able to use more than just an integer, as of the grammar "method[:detail]". The method can be either "none" or "gzip", and can optionally take a detail string. If the detail string is only an integer, it defines the compression level. A comma-separated list of keywords can also be used method allows for more options, the only keyword supported now is "level". The change is backward-compatible, hence specifying only an integer leads to no compression for a level of 0 and gzip compression when the level is greater than 0. Most of the code changes are straight-forward, as pg_dump was relying on an integer tracking the compression level to check for gzip or no compression. These are changed to use a compression specification and the algorithm stored in it. As of this change, note that the dump format is not bumped because there is no need yet to track the compression algorithm in the TOC entries. Hence, we still rely on the compression level to make the difference when reading them. This will be mandatory once a new compression method is added, though. In order to keep the code simpler when parsing the compression specification, the code is changed so as pg_dump now fails hard when using gzip on -Z/--compress without its support compiled, rather than enforcing no compression without the user knowing about it except through a warning. Like before this commit, archive and custom formats are compressed by default when the code is compiled with gzip, and left uncompressed without gzip. Author: Georgios Kokolatos Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
if (AH->compression_spec.algorithm != PG_COMPRESSION_NONE)
pg_fatal("compression is not supported by tar archive format");
}
else
{ /* Read Mode */
if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
{
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_R);
if (ctx->tarFH == NULL)
pg_fatal("could not open TOC file \"%s\" for input: %m",
AH->fSpec);
}
else
{
ctx->tarFH = stdin;
if (ctx->tarFH == NULL)
pg_fatal("could not open TOC file for input: %m");
}
/*
* Make unbuffered since we will dup() it, and the buffers screw each
* other
*/
/* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
ctx->tarFHpos = 0;
ctx->hasSeek = checkSeek(ctx->tarFH);
ctx->FH = (void *) tarOpen(AH, "toc.dat", 'r');
ReadHead(AH);
ReadToc(AH);
tarClose(AH, ctx->FH); /* Nothing else in the file... */
}
}
/*
* - Start a new TOC entry
* Setup the output file name.
*/
static void
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
{
lclTocEntry *ctx;
char fn[K_STD_BUF_SIZE];
ctx = pg_malloc0_object(lclTocEntry);
if (te->dataDumper != NULL)
{
snprintf(fn, sizeof(fn), "%d.dat", te->dumpId);
ctx->filename = pg_strdup(fn);
}
else
{
ctx->filename = NULL;
ctx->TH = NULL;
}
te->formatData = (void *) ctx;
}
static void
_WriteExtraToc(ArchiveHandle *AH, TocEntry *te)
{
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
if (ctx->filename)
WriteStr(AH, ctx->filename);
else
WriteStr(AH, "");
}
static void
_ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
{
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
if (ctx == NULL)
{
ctx = pg_malloc0_object(lclTocEntry);
te->formatData = (void *) ctx;
}
ctx->filename = ReadStr(AH);
if (strlen(ctx->filename) == 0)
{
free(ctx->filename);
ctx->filename = NULL;
}
ctx->TH = NULL;
}
static void
_PrintExtraToc(ArchiveHandle *AH, TocEntry *te)
{
lclTocEntry *ctx = (lclTocEntry *) te->formatData;
if (AH->public.verbose && ctx->filename != NULL)
ahprintf(AH, "-- File: %s\n", ctx->filename);
}
static void
_StartData(ArchiveHandle *AH, TocEntry *te)
{
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
tctx->TH = tarOpen(AH, tctx->filename, 'w');
}
static TAR_MEMBER *
tarOpen(ArchiveHandle *AH, const char *filename, char mode)
{
lclContext *ctx = (lclContext *) AH->formatData;
TAR_MEMBER *tm;
2001-03-22 05:01:46 +01:00
if (mode == 'r')
{
tm = _tarPositionTo(AH, filename);
if (!tm) /* Not found */
{
if (filename)
{
/*
* Couldn't find the requested file. Future: do SEEK(0) and
* retry.
*/
pg_fatal("could not find file \"%s\" in archive", filename);
}
else
{
/* Any file OK, none left, so return NULL */
return NULL;
}
}
Switch pg_dump to use compression specifications Compression specifications are currently used by pg_basebackup and pg_receivewal, and are able to let the user control in an extended way the method and level of compression used. As an effect of this commit, pg_dump's -Z/--compress is now able to use more than just an integer, as of the grammar "method[:detail]". The method can be either "none" or "gzip", and can optionally take a detail string. If the detail string is only an integer, it defines the compression level. A comma-separated list of keywords can also be used method allows for more options, the only keyword supported now is "level". The change is backward-compatible, hence specifying only an integer leads to no compression for a level of 0 and gzip compression when the level is greater than 0. Most of the code changes are straight-forward, as pg_dump was relying on an integer tracking the compression level to check for gzip or no compression. These are changed to use a compression specification and the algorithm stored in it. As of this change, note that the dump format is not bumped because there is no need yet to track the compression algorithm in the TOC entries. Hence, we still rely on the compression level to make the difference when reading them. This will be mandatory once a new compression method is added, though. In order to keep the code simpler when parsing the compression specification, the code is changed so as pg_dump now fails hard when using gzip on -Z/--compress without its support compiled, rather than enforcing no compression without the user knowing about it except through a warning. Like before this commit, archive and custom formats are compressed by default when the code is compiled with gzip, and left uncompressed without gzip. Author: Georgios Kokolatos Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
if (AH->compression_spec.algorithm == PG_COMPRESSION_NONE)
tm->nFH = ctx->tarFH;
else
pg_fatal("compression is not supported by tar archive format");
}
else
{
int old_umask;
tm = pg_malloc0_object(TAR_MEMBER);
/*
* POSIX does not require, but permits, tmpfile() to restrict file
* permissions. Given an OS crash after we write data, the filesystem
* might retain the data but forget tmpfile()'s unlink(). If so, the
* file mode protects confidentiality of the data written.
*/
old_umask = umask(S_IRWXG | S_IRWXO);
#ifndef WIN32
tm->tmpFH = tmpfile();
#else
2006-10-04 02:30:14 +02:00
/*
* On WIN32, tmpfile() generates a filename in the root directory,
* which requires administrative permissions on certain systems. Loop
* until we find a unique file name we can create.
*/
while (1)
{
char *name;
int fd;
2006-10-04 02:30:14 +02:00
name = _tempnam(NULL, "pg_temp_");
if (name == NULL)
break;
fd = open(name, O_RDWR | O_CREAT | O_EXCL | O_BINARY |
O_TEMPORARY, S_IRUSR | S_IWUSR);
free(name);
if (fd != -1) /* created a file */
{
tm->tmpFH = fdopen(fd, "w+b");
break;
}
else if (errno != EEXIST) /* failure other than file exists */
break;
}
#endif
if (tm->tmpFH == NULL)
pg_fatal("could not generate temporary file name: %m");
umask(old_umask);
Switch pg_dump to use compression specifications Compression specifications are currently used by pg_basebackup and pg_receivewal, and are able to let the user control in an extended way the method and level of compression used. As an effect of this commit, pg_dump's -Z/--compress is now able to use more than just an integer, as of the grammar "method[:detail]". The method can be either "none" or "gzip", and can optionally take a detail string. If the detail string is only an integer, it defines the compression level. A comma-separated list of keywords can also be used method allows for more options, the only keyword supported now is "level". The change is backward-compatible, hence specifying only an integer leads to no compression for a level of 0 and gzip compression when the level is greater than 0. Most of the code changes are straight-forward, as pg_dump was relying on an integer tracking the compression level to check for gzip or no compression. These are changed to use a compression specification and the algorithm stored in it. As of this change, note that the dump format is not bumped because there is no need yet to track the compression algorithm in the TOC entries. Hence, we still rely on the compression level to make the difference when reading them. This will be mandatory once a new compression method is added, though. In order to keep the code simpler when parsing the compression specification, the code is changed so as pg_dump now fails hard when using gzip on -Z/--compress without its support compiled, rather than enforcing no compression without the user knowing about it except through a warning. Like before this commit, archive and custom formats are compressed by default when the code is compiled with gzip, and left uncompressed without gzip. Author: Georgios Kokolatos Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
if (AH->compression_spec.algorithm == PG_COMPRESSION_NONE)
tm->nFH = tm->tmpFH;
else
pg_fatal("compression is not supported by tar archive format");
tm->AH = AH;
tm->targetFile = pg_strdup(filename);
}
tm->mode = mode;
tm->tarFH = ctx->tarFH;
return tm;
}
static void
tarClose(ArchiveHandle *AH, TAR_MEMBER *th)
{
Switch pg_dump to use compression specifications Compression specifications are currently used by pg_basebackup and pg_receivewal, and are able to let the user control in an extended way the method and level of compression used. As an effect of this commit, pg_dump's -Z/--compress is now able to use more than just an integer, as of the grammar "method[:detail]". The method can be either "none" or "gzip", and can optionally take a detail string. If the detail string is only an integer, it defines the compression level. A comma-separated list of keywords can also be used method allows for more options, the only keyword supported now is "level". The change is backward-compatible, hence specifying only an integer leads to no compression for a level of 0 and gzip compression when the level is greater than 0. Most of the code changes are straight-forward, as pg_dump was relying on an integer tracking the compression level to check for gzip or no compression. These are changed to use a compression specification and the algorithm stored in it. As of this change, note that the dump format is not bumped because there is no need yet to track the compression algorithm in the TOC entries. Hence, we still rely on the compression level to make the difference when reading them. This will be mandatory once a new compression method is added, though. In order to keep the code simpler when parsing the compression specification, the code is changed so as pg_dump now fails hard when using gzip on -Z/--compress without its support compiled, rather than enforcing no compression without the user knowing about it except through a warning. Like before this commit, archive and custom formats are compressed by default when the code is compiled with gzip, and left uncompressed without gzip. Author: Georgios Kokolatos Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
if (AH->compression_spec.algorithm != PG_COMPRESSION_NONE)
pg_fatal("compression is not supported by tar archive format");
if (th->mode == 'w')
_tarAddFile(AH, th); /* This will close the temp file */
2001-03-22 05:01:46 +01:00
/*
* else Nothing to do for normal read since we don't dup() normal file
* handle, and we don't use temp files.
*/
free(th->targetFile);
th->nFH = NULL;
}
#ifdef __NOT_USED__
static char *
tarGets(char *buf, size_t len, TAR_MEMBER *th)
{
char *s;
size_t cnt = 0;
char c = ' ';
int eof = 0;
/* Can't read past logical EOF */
if (len > (th->fileLen - th->pos))
len = th->fileLen - th->pos;
while (cnt < len && c != '\n')
{
if (_tarReadRaw(th->AH, &c, 1, th, NULL) <= 0)
{
eof = 1;
break;
}
buf[cnt++] = c;
}
if (eof && cnt == 0)
s = NULL;
else
{
buf[cnt++] = '\0';
s = buf;
}
if (s)
{
len = strlen(s);
th->pos += len;
}
return s;
}
#endif
/*
* Just read bytes from the archive. This is the low level read routine
* that is used for ALL reads on a tar file.
*/
static size_t
_tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh)
{
lclContext *ctx = (lclContext *) AH->formatData;
size_t avail;
size_t used = 0;
size_t res = 0;
Assert(th || fh);
avail = AH->lookaheadLen - AH->lookaheadPos;
if (avail > 0)
{
/* We have some lookahead bytes to use */
if (avail >= len) /* Just use the lookahead buffer */
used = len;
else
used = avail;
/* Copy, and adjust buffer pos */
memcpy(buf, AH->lookahead + AH->lookaheadPos, used);
AH->lookaheadPos += used;
/* Adjust required length */
len -= used;
}
/* Read the file if len > 0 */
if (len > 0)
{
if (fh)
{
res = fread(&((char *) buf)[used], 1, len, fh);
if (res != len && !feof(fh))
READ_ERROR_EXIT(fh);
}
else if (th)
{
res = fread(&((char *) buf)[used], 1, len, th->nFH);
if (res != len && !feof(th->nFH))
READ_ERROR_EXIT(th->nFH);
}
}
ctx->tarFHpos += res + used;
return (res + used);
}
2001-03-22 05:01:46 +01:00
static size_t
tarRead(void *buf, size_t len, TAR_MEMBER *th)
{
size_t res;
if (th->pos + len > th->fileLen)
len = th->fileLen - th->pos;
if (len <= 0)
return 0;
res = _tarReadRaw(th->AH, buf, len, th, NULL);
th->pos += res;
return res;
}
static size_t
tarWrite(const void *buf, size_t len, TAR_MEMBER *th)
{
size_t res;
res = fwrite(buf, 1, len, th->nFH);
th->pos += res;
return res;
}
static void
_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
{
lclTocEntry *tctx = (lclTocEntry *) AH->currToc->formatData;
if (tarWrite(data, dLen, tctx->TH) != dLen)
WRITE_ERROR_EXIT;
}
static void
_EndData(ArchiveHandle *AH, TocEntry *te)
{
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
/* Close the file */
tarClose(AH, tctx->TH);
tctx->TH = NULL;
}
/*
* Print data for a given file
*/
static void
_PrintFileData(ArchiveHandle *AH, char *filename)
{
lclContext *ctx = (lclContext *) AH->formatData;
char buf[4096];
size_t cnt;
TAR_MEMBER *th;
if (!filename)
return;
th = tarOpen(AH, filename, 'r');
ctx->FH = th;
while ((cnt = tarRead(buf, 4095, th)) > 0)
{
buf[cnt] = '\0';
ahwrite(buf, 1, cnt, AH);
}
tarClose(AH, th);
}
/*
* Print data for a given TOC entry
*/
static void
_PrintTocData(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
int pos1;
2001-03-22 05:01:46 +01:00
if (!tctx->filename)
return;
/*
* If we're writing the special restore.sql script, emit a suitable
* command to include each table's data from the corresponding file.
*
* In the COPY case this is a bit klugy because the regular COPY command
* was already printed before we get control.
*/
if (ctx->isSpecialScript)
{
if (te->copyStmt)
{
/* Abort the COPY FROM stdin */
ahprintf(AH, "\\.\n");
/*
* The COPY statement should look like "COPY ... FROM stdin;\n",
* see dumpTableData().
*/
pos1 = (int) strlen(te->copyStmt) - 13;
if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 ||
strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0)
pg_fatal("unexpected COPY statement syntax: \"%s\"",
te->copyStmt);
/* Emit all but the FROM part ... */
ahwrite(te->copyStmt, 1, pos1, AH);
/* ... and insert modified FROM */
ahprintf(AH, " FROM '$$PATH$$/%s';\n\n", tctx->filename);
}
else
{
/* --inserts mode, no worries, just include the data file */
ahprintf(AH, "\\i $$PATH$$/%s\n\n", tctx->filename);
}
return;
}
if (strcmp(te->desc, "BLOBS") == 0)
_LoadLOs(AH);
else
_PrintFileData(AH, tctx->filename);
}
static void
_LoadLOs(ArchiveHandle *AH)
{
Oid oid;
lclContext *ctx = (lclContext *) AH->formatData;
TAR_MEMBER *th;
size_t cnt;
bool foundLO = false;
char buf[4096];
StartRestoreLOs(AH);
th = tarOpen(AH, NULL, 'r'); /* Open next file */
while (th != NULL)
{
ctx->FH = th;
if (strncmp(th->targetFile, "blob_", 5) == 0)
{
oid = atooid(&th->targetFile[5]);
if (oid != 0)
{
Unified logging system for command-line programs This unifies the various ad hoc logging (message printing, error printing) systems used throughout the command-line programs. Features: - Program name is automatically prefixed. - Message string does not end with newline. This removes a common source of inconsistencies and omissions. - Additionally, a final newline is automatically stripped, simplifying use of PQerrorMessage() etc., another common source of mistakes. - I converted error message strings to use %m where possible. - As a result of the above several points, more translatable message strings can be shared between different components and between frontends and backend, without gratuitous punctuation or whitespace differences. - There is support for setting a "log level". This is not meant to be user-facing, but can be used internally to implement debug or verbose modes. - Lazy argument evaluation, so no significant overhead if logging at some level is disabled. - Some color in the messages, similar to gcc and clang. Set PG_COLOR=auto to try it out. Some colors are predefined, but can be customized by setting PG_COLORS. - Common files (common/, fe_utils/, etc.) can handle logging much more simply by just using one API without worrying too much about the context of the calling program, requiring callbacks, or having to pass "progname" around everywhere. - Some programs called setvbuf() to make sure that stderr is unbuffered, even on Windows. But not all programs did that. This is now done centrally. Soft goals: - Reduces vertical space use and visual complexity of error reporting in the source code. - Encourages more deliberate classification of messages. For example, in some cases it wasn't clear without analyzing the surrounding code whether a message was meant as an error or just an info. - Concepts and terms are vaguely aligned with popular logging frameworks such as log4j and Python logging. This is all just about printing stuff out. Nothing affects program flow (e.g., fatal exits). The uses are just too varied to do that. Some existing code had wrappers that do some kind of print-and-exit, and I adapted those. I tried to keep the output mostly the same, but there is a lot of historical baggage to unwind and special cases to consider, and I might not always have succeeded. One significant change is that pg_rewind used to write all error messages to stdout. That is now changed to stderr. Reviewed-by: Donald Dong <xdong@csumb.edu> Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru> Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
pg_log_info("restoring large object with OID %u", oid);
StartRestoreLO(AH, oid, AH->public.ropt->dropSchema);
while ((cnt = tarRead(buf, 4095, th)) > 0)
{
buf[cnt] = '\0';
ahwrite(buf, 1, cnt, AH);
}
EndRestoreLO(AH, oid);
foundLO = true;
}
tarClose(AH, th);
}
else
{
tarClose(AH, th);
2007-11-15 22:14:46 +01:00
/*
* Once we have found the first LO, stop at the first non-LO entry
* (which will be 'blobs.toc'). This coding would eat all the
* rest of the archive if there are no LOs ... but this function
* shouldn't be called at all in that case.
*/
if (foundLO)
break;
}
th = tarOpen(AH, NULL, 'r');
}
EndRestoreLOs(AH);
}
static int
_WriteByte(ArchiveHandle *AH, const int i)
{
lclContext *ctx = (lclContext *) AH->formatData;
char b = i; /* Avoid endian problems */
if (tarWrite(&b, 1, ctx->FH) != 1)
WRITE_ERROR_EXIT;
ctx->filePos += 1;
return 1;
}
static int
_ReadByte(ArchiveHandle *AH)
{
lclContext *ctx = (lclContext *) AH->formatData;
size_t res;
unsigned char c;
res = tarRead(&c, 1, ctx->FH);
if (res != 1)
/* We already would have exited for errors on reads, must be EOF */
pg_fatal("could not read from input file: end of file");
ctx->filePos += 1;
return c;
}
static void
_WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
{
lclContext *ctx = (lclContext *) AH->formatData;
if (tarWrite(buf, len, ctx->FH) != len)
WRITE_ERROR_EXIT;
ctx->filePos += len;
}
static void
_ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
{
lclContext *ctx = (lclContext *) AH->formatData;
if (tarRead(buf, len, ctx->FH) != len)
/* We already would have exited for errors on reads, must be EOF */
pg_fatal("could not read from input file: end of file");
ctx->filePos += len;
}
static void
_CloseArchive(ArchiveHandle *AH)
{
lclContext *ctx = (lclContext *) AH->formatData;
TAR_MEMBER *th;
RestoreOptions *ropt;
RestoreOptions *savRopt;
DumpOptions *savDopt;
int savVerbose,
i;
if (AH->mode == archModeWrite)
{
/*
* Write the Header & TOC to the archive FIRST
*/
th = tarOpen(AH, "toc.dat", 'w');
ctx->FH = th;
WriteHead(AH);
WriteToc(AH);
tarClose(AH, th); /* Not needed any more */
/*
* Now send the data (tables & LOs)
*/
WriteDataChunks(AH, NULL);
/*
* Now this format wants to append a script which does a full restore
* if the files have been extracted.
*/
th = tarOpen(AH, "restore.sql", 'w');
tarPrintf(th, "--\n"
"-- NOTE:\n"
"--\n"
"-- File paths need to be edited. Search for $$PATH$$ and\n"
"-- replace it with the path to the directory containing\n"
"-- the extracted data files.\n"
"--\n");
AH->CustomOutPtr = _scriptOut;
2001-03-22 05:01:46 +01:00
ctx->isSpecialScript = 1;
ctx->scriptTH = th;
ropt = NewRestoreOptions();
memcpy(ropt, AH->public.ropt, sizeof(RestoreOptions));
ropt->filename = NULL;
ropt->dropSchema = 1;
ropt->superuser = NULL;
ropt->suppressDumpWarnings = true;
savDopt = AH->public.dopt;
savRopt = AH->public.ropt;
SetArchiveOptions((Archive *) AH, NULL, ropt);
savVerbose = AH->public.verbose;
AH->public.verbose = 0;
RestoreArchive((Archive *) AH);
SetArchiveOptions((Archive *) AH, savDopt, savRopt);
AH->public.verbose = savVerbose;
tarClose(AH, th);
ctx->isSpecialScript = 0;
/*
* EOF marker for tar files is two blocks of NULLs.
*/
for (i = 0; i < TAR_BLOCK_SIZE * 2; i++)
{
if (fputc(0, ctx->tarFH) == EOF)
WRITE_ERROR_EXIT;
}
/* Sync the output file if one is defined */
if (AH->dosync && AH->fSpec)
Unified logging system for command-line programs This unifies the various ad hoc logging (message printing, error printing) systems used throughout the command-line programs. Features: - Program name is automatically prefixed. - Message string does not end with newline. This removes a common source of inconsistencies and omissions. - Additionally, a final newline is automatically stripped, simplifying use of PQerrorMessage() etc., another common source of mistakes. - I converted error message strings to use %m where possible. - As a result of the above several points, more translatable message strings can be shared between different components and between frontends and backend, without gratuitous punctuation or whitespace differences. - There is support for setting a "log level". This is not meant to be user-facing, but can be used internally to implement debug or verbose modes. - Lazy argument evaluation, so no significant overhead if logging at some level is disabled. - Some color in the messages, similar to gcc and clang. Set PG_COLOR=auto to try it out. Some colors are predefined, but can be customized by setting PG_COLORS. - Common files (common/, fe_utils/, etc.) can handle logging much more simply by just using one API without worrying too much about the context of the calling program, requiring callbacks, or having to pass "progname" around everywhere. - Some programs called setvbuf() to make sure that stderr is unbuffered, even on Windows. But not all programs did that. This is now done centrally. Soft goals: - Reduces vertical space use and visual complexity of error reporting in the source code. - Encourages more deliberate classification of messages. For example, in some cases it wasn't clear without analyzing the surrounding code whether a message was meant as an error or just an info. - Concepts and terms are vaguely aligned with popular logging frameworks such as log4j and Python logging. This is all just about printing stuff out. Nothing affects program flow (e.g., fatal exits). The uses are just too varied to do that. Some existing code had wrappers that do some kind of print-and-exit, and I adapted those. I tried to keep the output mostly the same, but there is a lot of historical baggage to unwind and special cases to consider, and I might not always have succeeded. One significant change is that pg_rewind used to write all error messages to stdout. That is now changed to stderr. Reviewed-by: Donald Dong <xdong@csumb.edu> Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru> Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
(void) fsync_fname(AH->fSpec, false);
}
AH->FH = NULL;
}
static size_t
_scriptOut(ArchiveHandle *AH, const void *buf, size_t len)
{
lclContext *ctx = (lclContext *) AH->formatData;
2001-03-22 05:01:46 +01:00
return tarWrite(buf, len, ctx->scriptTH);
}
/*
* Large Object support
*/
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
* to read the LOs back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
* Optional, but strongly recommended.
*
*/
static void
_StartLOs(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
char fname[K_STD_BUF_SIZE];
sprintf(fname, "blobs.toc");
ctx->loToc = tarOpen(AH, fname, 'w');
}
/*
* Called by the archiver when the dumper calls StartLO.
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void
_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclContext *ctx = (lclContext *) AH->formatData;
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
char fname[255];
if (oid == 0)
pg_fatal("invalid OID for large object (%u)", oid);
Switch pg_dump to use compression specifications Compression specifications are currently used by pg_basebackup and pg_receivewal, and are able to let the user control in an extended way the method and level of compression used. As an effect of this commit, pg_dump's -Z/--compress is now able to use more than just an integer, as of the grammar "method[:detail]". The method can be either "none" or "gzip", and can optionally take a detail string. If the detail string is only an integer, it defines the compression level. A comma-separated list of keywords can also be used method allows for more options, the only keyword supported now is "level". The change is backward-compatible, hence specifying only an integer leads to no compression for a level of 0 and gzip compression when the level is greater than 0. Most of the code changes are straight-forward, as pg_dump was relying on an integer tracking the compression level to check for gzip or no compression. These are changed to use a compression specification and the algorithm stored in it. As of this change, note that the dump format is not bumped because there is no need yet to track the compression algorithm in the TOC entries. Hence, we still rely on the compression level to make the difference when reading them. This will be mandatory once a new compression method is added, though. In order to keep the code simpler when parsing the compression specification, the code is changed so as pg_dump now fails hard when using gzip on -Z/--compress without its support compiled, rather than enforcing no compression without the user knowing about it except through a warning. Like before this commit, archive and custom formats are compressed by default when the code is compiled with gzip, and left uncompressed without gzip. Author: Georgios Kokolatos Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/O4mutIrCES8ZhlXJiMvzsivT7ztAMja2lkdL1LJx6O5f22I2W8PBIeLKz7mDLwxHoibcnRAYJXm1pH4tyUNC4a8eDzLn22a6Pb1S74Niexg=@pm.me
2022-12-02 02:45:02 +01:00
if (AH->compression_spec.algorithm != PG_COMPRESSION_NONE)
pg_fatal("compression is not supported by tar archive format");
sprintf(fname, "blob_%u.dat", oid);
tarPrintf(ctx->loToc, "%u %s\n", oid, fname);
tctx->TH = tarOpen(AH, fname, 'w');
}
/*
* Called by the archiver when the dumper calls EndLO.
*
* Optional.
*
*/
static void
_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
lclTocEntry *tctx = (lclTocEntry *) te->formatData;
tarClose(AH, tctx->TH);
}
/*
* Called by the archiver when finishing saving all BLOB DATA.
*
* Optional.
*
*/
static void
_EndLOs(ArchiveHandle *AH, TocEntry *te)
{
lclContext *ctx = (lclContext *) AH->formatData;
2001-03-22 05:01:46 +01:00
/* Write out a fake zero OID to mark end-of-LOs. */
/* WriteInt(AH, 0); */
tarClose(AH, ctx->loToc);
}
/*------------
* TAR Support
*------------
*/
static int
tarPrintf(TAR_MEMBER *th, const char *fmt,...)
{
int save_errno = errno;
char *p;
size_t len = 128; /* initial assumption about buffer size */
size_t cnt;
2001-03-22 05:01:46 +01:00
for (;;)
{
va_list args;
/* Allocate work buffer. */
p = (char *) pg_malloc(len);
/* Try to format the data. */
errno = save_errno;
va_start(args, fmt);
cnt = pvsnprintf(p, len, fmt, args);
va_end(args);
if (cnt < len)
break; /* success */
/* Release buffer and loop around to try again with larger len. */
free(p);
len = cnt;
}
cnt = tarWrite(p, cnt, th);
free(p);
return (int) cnt;
}
bool
isValidTarHeader(char *header)
{
int sum;
int chk = tarChecksum(header);
Adopt the GNU convention for handling tar-archive members exceeding 8GB. The POSIX standard for tar headers requires archive member sizes to be printed in octal with at most 11 digits, limiting the representable file size to 8GB. However, GNU tar and apparently most other modern tars support a convention in which oversized values can be stored in base-256, allowing any practical file to be a tar member. Adopt this convention to remove two limitations: * pg_dump with -Ft output format failed if the contents of any one table exceeded 8GB. * pg_basebackup failed if the data directory contained any file exceeding 8GB. (This would be a fatal problem for installations configured with a table segment size of 8GB or more, and it has also been seen to fail when large core dump files exist in the data directory.) File sizes under 8GB are still printed in octal, so that no compatibility issues are created except in cases that would have failed entirely before. In addition, this patch fixes several bugs in the same area: * In 9.3 and later, we'd defined tarCreateHeader's file-size argument as size_t, which meant that on 32-bit machines it would write a corrupt tar header for file sizes between 4GB and 8GB, even though no error was raised. This broke both "pg_dump -Ft" and pg_basebackup for such cases. * pg_restore from a tar archive would fail on tables of size between 4GB and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits. This happened even with an archive file not affected by the previous bug. * pg_basebackup would fail if there were files of size between 4GB and 8GB, even on 64-bit machines. * In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size, on 64-bit big-endian machines. In view of these potential data-loss bugs, back-patch to all supported branches, even though removal of the documented 8GB limit might otherwise be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
sum = read_tar_number(&header[148], 8);
if (sum != chk)
return false;
/* POSIX tar format */
if (memcmp(&header[257], "ustar\0", 6) == 0 &&
memcmp(&header[263], "00", 2) == 0)
return true;
/* GNU tar format */
if (memcmp(&header[257], "ustar \0", 8) == 0)
return true;
/* not-quite-POSIX format written by pre-9.3 pg_dump */
if (memcmp(&header[257], "ustar00\0", 8) == 0)
return true;
return false;
}
/* Given the member, write the TAR header & copy the file */
static void
_tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th)
{
lclContext *ctx = (lclContext *) AH->formatData;
FILE *tmp = th->tmpFH; /* Grab it for convenience */
char buf[32768];
size_t cnt;
pgoff_t len = 0;
size_t res;
size_t i,
pad;
/*
* Find file len & go back to start.
*/
if (fseeko(tmp, 0, SEEK_END) != 0)
pg_fatal("error during file seek: %m");
th->fileLen = ftello(tmp);
if (th->fileLen < 0)
pg_fatal("could not determine seek position in archive file: %m");
if (fseeko(tmp, 0, SEEK_SET) != 0)
pg_fatal("error during file seek: %m");
2005-10-15 04:49:52 +02:00
_tarWriteHeader(th);
while ((cnt = fread(buf, 1, sizeof(buf), tmp)) > 0)
{
if ((res = fwrite(buf, 1, cnt, th->tarFH)) != cnt)
WRITE_ERROR_EXIT;
len += res;
}
if (!feof(tmp))
READ_ERROR_EXIT(tmp);
if (fclose(tmp) != 0) /* This *should* delete it... */
pg_fatal("could not close temporary file: %m");
if (len != th->fileLen)
pg_fatal("actual file length (%lld) does not match expected (%lld)",
(long long) len, (long long) th->fileLen);
pad = tarPaddingBytesRequired(len);
for (i = 0; i < pad; i++)
{
if (fputc('\0', th->tarFH) == EOF)
WRITE_ERROR_EXIT;
}
ctx->tarFHpos += len + pad;
}
/* Locate the file in the archive, read header and position to data */
static TAR_MEMBER *
_tarPositionTo(ArchiveHandle *AH, const char *filename)
{
lclContext *ctx = (lclContext *) AH->formatData;
TAR_MEMBER *th = pg_malloc0_object(TAR_MEMBER);
char c;
char header[TAR_BLOCK_SIZE];
size_t i,
len,
blks;
int id;
th->AH = AH;
/* Go to end of current file, if any */
if (ctx->tarFHpos != 0)
{
pg_log_debug("moving from position %lld to next member at file position %lld",
(long long) ctx->tarFHpos, (long long) ctx->tarNextMember);
while (ctx->tarFHpos < ctx->tarNextMember)
_tarReadRaw(AH, &c, 1, NULL, ctx->tarFH);
}
pg_log_debug("now at file position %lld", (long long) ctx->tarFHpos);
/* We are at the start of the file, or at the next member */
/* Get the header */
if (!_tarGetHeader(AH, th))
{
if (filename)
pg_fatal("could not find header for file \"%s\" in tar archive", filename);
else
{
/*
* We're just scanning the archive for the next file, so return
* null
*/
free(th);
return NULL;
}
}
while (filename != NULL && strcmp(th->targetFile, filename) != 0)
{
Unified logging system for command-line programs This unifies the various ad hoc logging (message printing, error printing) systems used throughout the command-line programs. Features: - Program name is automatically prefixed. - Message string does not end with newline. This removes a common source of inconsistencies and omissions. - Additionally, a final newline is automatically stripped, simplifying use of PQerrorMessage() etc., another common source of mistakes. - I converted error message strings to use %m where possible. - As a result of the above several points, more translatable message strings can be shared between different components and between frontends and backend, without gratuitous punctuation or whitespace differences. - There is support for setting a "log level". This is not meant to be user-facing, but can be used internally to implement debug or verbose modes. - Lazy argument evaluation, so no significant overhead if logging at some level is disabled. - Some color in the messages, similar to gcc and clang. Set PG_COLOR=auto to try it out. Some colors are predefined, but can be customized by setting PG_COLORS. - Common files (common/, fe_utils/, etc.) can handle logging much more simply by just using one API without worrying too much about the context of the calling program, requiring callbacks, or having to pass "progname" around everywhere. - Some programs called setvbuf() to make sure that stderr is unbuffered, even on Windows. But not all programs did that. This is now done centrally. Soft goals: - Reduces vertical space use and visual complexity of error reporting in the source code. - Encourages more deliberate classification of messages. For example, in some cases it wasn't clear without analyzing the surrounding code whether a message was meant as an error or just an info. - Concepts and terms are vaguely aligned with popular logging frameworks such as log4j and Python logging. This is all just about printing stuff out. Nothing affects program flow (e.g., fatal exits). The uses are just too varied to do that. Some existing code had wrappers that do some kind of print-and-exit, and I adapted those. I tried to keep the output mostly the same, but there is a lot of historical baggage to unwind and special cases to consider, and I might not always have succeeded. One significant change is that pg_rewind used to write all error messages to stdout. That is now changed to stderr. Reviewed-by: Donald Dong <xdong@csumb.edu> Reviewed-by: Arthur Zakirov <a.zakirov@postgrespro.ru> Discussion: https://www.postgresql.org/message-id/flat/6a609b43-4f57-7348-6480-bd022f924310@2ndquadrant.com
2019-04-01 14:24:37 +02:00
pg_log_debug("skipping tar member %s", th->targetFile);
id = atoi(th->targetFile);
if ((TocIDRequired(AH, id) & REQ_DATA) != 0)
pg_fatal("restoring data out of order is not supported in this archive format: "
"\"%s\" is required, but comes before \"%s\" in the archive file.",
th->targetFile, filename);
/* Header doesn't match, so read to next header */
len = th->fileLen;
len += tarPaddingBytesRequired(th->fileLen);
blks = len / TAR_BLOCK_SIZE; /* # of tar blocks */
for (i = 0; i < blks; i++)
_tarReadRaw(AH, &header[0], TAR_BLOCK_SIZE, NULL, ctx->tarFH);
if (!_tarGetHeader(AH, th))
pg_fatal("could not find header for file \"%s\" in tar archive", filename);
}
ctx->tarNextMember = ctx->tarFHpos + th->fileLen
+ tarPaddingBytesRequired(th->fileLen);
th->pos = 0;
return th;
}
/* Read & verify a header */
static int
_tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
{
lclContext *ctx = (lclContext *) AH->formatData;
char h[TAR_BLOCK_SIZE];
Adopt the GNU convention for handling tar-archive members exceeding 8GB. The POSIX standard for tar headers requires archive member sizes to be printed in octal with at most 11 digits, limiting the representable file size to 8GB. However, GNU tar and apparently most other modern tars support a convention in which oversized values can be stored in base-256, allowing any practical file to be a tar member. Adopt this convention to remove two limitations: * pg_dump with -Ft output format failed if the contents of any one table exceeded 8GB. * pg_basebackup failed if the data directory contained any file exceeding 8GB. (This would be a fatal problem for installations configured with a table segment size of 8GB or more, and it has also been seen to fail when large core dump files exist in the data directory.) File sizes under 8GB are still printed in octal, so that no compatibility issues are created except in cases that would have failed entirely before. In addition, this patch fixes several bugs in the same area: * In 9.3 and later, we'd defined tarCreateHeader's file-size argument as size_t, which meant that on 32-bit machines it would write a corrupt tar header for file sizes between 4GB and 8GB, even though no error was raised. This broke both "pg_dump -Ft" and pg_basebackup for such cases. * pg_restore from a tar archive would fail on tables of size between 4GB and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits. This happened even with an archive file not affected by the previous bug. * pg_basebackup would fail if there were files of size between 4GB and 8GB, even on 64-bit machines. * In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size, on 64-bit big-endian machines. In view of these potential data-loss bugs, back-patch to all supported branches, even though removal of the documented 8GB limit might otherwise be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
char tag[100 + 1];
int sum,
chk;
Adopt the GNU convention for handling tar-archive members exceeding 8GB. The POSIX standard for tar headers requires archive member sizes to be printed in octal with at most 11 digits, limiting the representable file size to 8GB. However, GNU tar and apparently most other modern tars support a convention in which oversized values can be stored in base-256, allowing any practical file to be a tar member. Adopt this convention to remove two limitations: * pg_dump with -Ft output format failed if the contents of any one table exceeded 8GB. * pg_basebackup failed if the data directory contained any file exceeding 8GB. (This would be a fatal problem for installations configured with a table segment size of 8GB or more, and it has also been seen to fail when large core dump files exist in the data directory.) File sizes under 8GB are still printed in octal, so that no compatibility issues are created except in cases that would have failed entirely before. In addition, this patch fixes several bugs in the same area: * In 9.3 and later, we'd defined tarCreateHeader's file-size argument as size_t, which meant that on 32-bit machines it would write a corrupt tar header for file sizes between 4GB and 8GB, even though no error was raised. This broke both "pg_dump -Ft" and pg_basebackup for such cases. * pg_restore from a tar archive would fail on tables of size between 4GB and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits. This happened even with an archive file not affected by the previous bug. * pg_basebackup would fail if there were files of size between 4GB and 8GB, even on 64-bit machines. * In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size, on 64-bit big-endian machines. In view of these potential data-loss bugs, back-patch to all supported branches, even though removal of the documented 8GB limit might otherwise be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
pgoff_t len;
pgoff_t hPos;
bool gotBlock = false;
while (!gotBlock)
{
/* Save the pos for reporting purposes */
hPos = ctx->tarFHpos;
/* Read the next tar block, return EOF, exit if short */
len = _tarReadRaw(AH, h, TAR_BLOCK_SIZE, NULL, ctx->tarFH);
if (len == 0) /* EOF */
return 0;
if (len != TAR_BLOCK_SIZE)
pg_fatal(ngettext("incomplete tar header found (%lu byte)",
"incomplete tar header found (%lu bytes)",
len),
(unsigned long) len);
/* Calc checksum */
chk = tarChecksum(h);
Adopt the GNU convention for handling tar-archive members exceeding 8GB. The POSIX standard for tar headers requires archive member sizes to be printed in octal with at most 11 digits, limiting the representable file size to 8GB. However, GNU tar and apparently most other modern tars support a convention in which oversized values can be stored in base-256, allowing any practical file to be a tar member. Adopt this convention to remove two limitations: * pg_dump with -Ft output format failed if the contents of any one table exceeded 8GB. * pg_basebackup failed if the data directory contained any file exceeding 8GB. (This would be a fatal problem for installations configured with a table segment size of 8GB or more, and it has also been seen to fail when large core dump files exist in the data directory.) File sizes under 8GB are still printed in octal, so that no compatibility issues are created except in cases that would have failed entirely before. In addition, this patch fixes several bugs in the same area: * In 9.3 and later, we'd defined tarCreateHeader's file-size argument as size_t, which meant that on 32-bit machines it would write a corrupt tar header for file sizes between 4GB and 8GB, even though no error was raised. This broke both "pg_dump -Ft" and pg_basebackup for such cases. * pg_restore from a tar archive would fail on tables of size between 4GB and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits. This happened even with an archive file not affected by the previous bug. * pg_basebackup would fail if there were files of size between 4GB and 8GB, even on 64-bit machines. * In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size, on 64-bit big-endian machines. In view of these potential data-loss bugs, back-patch to all supported branches, even though removal of the documented 8GB limit might otherwise be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
sum = read_tar_number(&h[148], 8);
/*
* If the checksum failed, see if it is a null block. If so, silently
* continue to the next block.
*/
if (chk == sum)
gotBlock = true;
else
{
int i;
for (i = 0; i < TAR_BLOCK_SIZE; i++)
{
if (h[i] != 0)
{
gotBlock = true;
break;
}
}
}
}
Adopt the GNU convention for handling tar-archive members exceeding 8GB. The POSIX standard for tar headers requires archive member sizes to be printed in octal with at most 11 digits, limiting the representable file size to 8GB. However, GNU tar and apparently most other modern tars support a convention in which oversized values can be stored in base-256, allowing any practical file to be a tar member. Adopt this convention to remove two limitations: * pg_dump with -Ft output format failed if the contents of any one table exceeded 8GB. * pg_basebackup failed if the data directory contained any file exceeding 8GB. (This would be a fatal problem for installations configured with a table segment size of 8GB or more, and it has also been seen to fail when large core dump files exist in the data directory.) File sizes under 8GB are still printed in octal, so that no compatibility issues are created except in cases that would have failed entirely before. In addition, this patch fixes several bugs in the same area: * In 9.3 and later, we'd defined tarCreateHeader's file-size argument as size_t, which meant that on 32-bit machines it would write a corrupt tar header for file sizes between 4GB and 8GB, even though no error was raised. This broke both "pg_dump -Ft" and pg_basebackup for such cases. * pg_restore from a tar archive would fail on tables of size between 4GB and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits. This happened even with an archive file not affected by the previous bug. * pg_basebackup would fail if there were files of size between 4GB and 8GB, even on 64-bit machines. * In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size, on 64-bit big-endian machines. In view of these potential data-loss bugs, back-patch to all supported branches, even though removal of the documented 8GB limit might otherwise be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
/* Name field is 100 bytes, might not be null-terminated */
strlcpy(tag, &h[0], 100 + 1);
len = read_tar_number(&h[124], 12);
pg_log_debug("TOC Entry %s at %llu (length %llu, checksum %d)",
tag, (unsigned long long) hPos, (unsigned long long) len, sum);
if (chk != sum)
pg_fatal("corrupt tar header found in %s (expected %d, computed %d) file position %llu",
tag, sum, chk, (unsigned long long) ftello(ctx->tarFH));
th->targetFile = pg_strdup(tag);
th->fileLen = len;
return 1;
}
static void
_tarWriteHeader(TAR_MEMBER *th)
{
char h[TAR_BLOCK_SIZE];
Adopt the GNU convention for handling tar-archive members exceeding 8GB. The POSIX standard for tar headers requires archive member sizes to be printed in octal with at most 11 digits, limiting the representable file size to 8GB. However, GNU tar and apparently most other modern tars support a convention in which oversized values can be stored in base-256, allowing any practical file to be a tar member. Adopt this convention to remove two limitations: * pg_dump with -Ft output format failed if the contents of any one table exceeded 8GB. * pg_basebackup failed if the data directory contained any file exceeding 8GB. (This would be a fatal problem for installations configured with a table segment size of 8GB or more, and it has also been seen to fail when large core dump files exist in the data directory.) File sizes under 8GB are still printed in octal, so that no compatibility issues are created except in cases that would have failed entirely before. In addition, this patch fixes several bugs in the same area: * In 9.3 and later, we'd defined tarCreateHeader's file-size argument as size_t, which meant that on 32-bit machines it would write a corrupt tar header for file sizes between 4GB and 8GB, even though no error was raised. This broke both "pg_dump -Ft" and pg_basebackup for such cases. * pg_restore from a tar archive would fail on tables of size between 4GB and 8GB, on machines where either "size_t" or "unsigned long" is 32 bits. This happened even with an archive file not affected by the previous bug. * pg_basebackup would fail if there were files of size between 4GB and 8GB, even on 64-bit machines. * In 9.3 and later, "pg_basebackup -Ft" failed entirely, for any file size, on 64-bit big-endian machines. In view of these potential data-loss bugs, back-patch to all supported branches, even though removal of the documented 8GB limit might otherwise be considered a new feature rather than a bug fix.
2015-11-22 02:21:31 +01:00
tarCreateHeader(h, th->targetFile, NULL, th->fileLen,
0600, 04000, 02000, time(NULL));
/* Now write the completed header. */
if (fwrite(h, 1, TAR_BLOCK_SIZE, th->tarFH) != TAR_BLOCK_SIZE)
WRITE_ERROR_EXIT;
}