postgresql/src/bin/pg_basebackup/bbstreamer_gzip.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

365 lines
9.5 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* bbstreamer_gzip.c
*
* Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/bin/pg_basebackup/bbstreamer_gzip.c
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <unistd.h>
#ifdef HAVE_LIBZ
#include <zlib.h>
#endif
#include "bbstreamer.h"
#include "common/file_perm.h"
#include "common/logging.h"
#include "common/string.h"
#ifdef HAVE_LIBZ
typedef struct bbstreamer_gzip_writer
{
bbstreamer base;
char *pathname;
gzFile gzfile;
} bbstreamer_gzip_writer;
typedef struct bbstreamer_gzip_decompressor
{
bbstreamer base;
z_stream zstream;
size_t bytes_written;
} bbstreamer_gzip_decompressor;
static void bbstreamer_gzip_writer_content(bbstreamer *streamer,
bbstreamer_member *member,
const char *data, int len,
bbstreamer_archive_context context);
static void bbstreamer_gzip_writer_finalize(bbstreamer *streamer);
static void bbstreamer_gzip_writer_free(bbstreamer *streamer);
static const char *get_gz_error(gzFile gzf);
const bbstreamer_ops bbstreamer_gzip_writer_ops = {
.content = bbstreamer_gzip_writer_content,
.finalize = bbstreamer_gzip_writer_finalize,
.free = bbstreamer_gzip_writer_free
};
static void bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
bbstreamer_member *member,
const char *data, int len,
bbstreamer_archive_context context);
static void bbstreamer_gzip_decompressor_finalize(bbstreamer *streamer);
static void bbstreamer_gzip_decompressor_free(bbstreamer *streamer);
static void *gzip_palloc(void *opaque, unsigned items, unsigned size);
static void gzip_pfree(void *opaque, void *address);
const bbstreamer_ops bbstreamer_gzip_decompressor_ops = {
.content = bbstreamer_gzip_decompressor_content,
.finalize = bbstreamer_gzip_decompressor_finalize,
.free = bbstreamer_gzip_decompressor_free
};
#endif
/*
* Create a bbstreamer that just compresses data using gzip, and then writes
* it to a file.
*
* As in the case of bbstreamer_plain_writer_new, pathname is always used
* for error reporting purposes; if file is NULL, it is also the opened and
* closed so that the data may be written there.
*/
bbstreamer *
Replace BASE_BACKUP COMPRESSION_LEVEL option with COMPRESSION_DETAIL. There are more compression parameters that can be specified than just an integer compression level, so rename the new COMPRESSION_LEVEL option to COMPRESSION_DETAIL before it gets released. Introduce a flexible syntax for that option to allow arbitrary options to be specified without needing to adjust the main replication grammar, and common code to parse it that is shared between the client and the server. This commit doesn't actually add any new compression parameters, so the only user-visible change is that you can now type something like pg_basebackup --compress gzip:level=5 instead of writing just pg_basebackup --compress gzip:5. However, it should make it easy to add new options. If for example gzip starts offering fries, we can support pg_basebackup --compress gzip:level=5,fries=true for the benefit of users who want fries with that. Along the way, this fixes a few things in pg_basebackup so that the pg_basebackup can be used with a server-side compression algorithm that pg_basebackup itself does not understand. For example, pg_basebackup --compress server-lz4 could still succeed even if only the server and not the client has LZ4 support, provided that the other options to pg_basebackup don't require the client to decompress the archive. Patch by me. Reviewed by Justin Pryzby and Dagfinn Ilmari Mannsåker. Discussion: http://postgr.es/m/CA+TgmoYvpetyRAbbg1M8b3-iHsaN4nsgmWPjOENu5-doHuJ7fA@mail.gmail.com
2022-03-23 14:19:14 +01:00
bbstreamer_gzip_writer_new(char *pathname, FILE *file,
pg_compress_specification *compress)
{
#ifdef HAVE_LIBZ
bbstreamer_gzip_writer *streamer;
streamer = palloc0(sizeof(bbstreamer_gzip_writer));
*((const bbstreamer_ops **) &streamer->base.bbs_ops) =
&bbstreamer_gzip_writer_ops;
streamer->pathname = pstrdup(pathname);
if (file == NULL)
{
streamer->gzfile = gzopen(pathname, "wb");
if (streamer->gzfile == NULL)
pg_fatal("could not create compressed file \"%s\": %m",
pathname);
}
else
{
int fd = dup(fileno(file));
if (fd < 0)
pg_fatal("could not duplicate stdout: %m");
streamer->gzfile = gzdopen(fd, "wb");
if (streamer->gzfile == NULL)
pg_fatal("could not open output file: %m");
}
Simplify handling of compression level with compression specifications PG_COMPRESSION_OPTION_LEVEL is removed from the compression specification logic, and instead the compression level is always assigned with each library's default if nothing is directly given. This centralizes the checks on the compression methods supported by a given build, and always assigns a default compression level when parsing a compression specification. This results in complaining at an earlier stage than previously if a build supports a compression method or not, aka when parsing a specification in the backend or the frontend, and not when processing it. zstd, lz4 and zlib are able to handle in their respective routines setting up the compression level the case of a default value, hence the backend or frontend code (pg_receivewal or pg_basebackup) has now no need to know what the default compression level should be if nothing is specified: the logic is now done so as the specification parsing assigns it. It can also be enforced by passing down a "level" set to the default value, that the backend will accept (the replication protocol is for example able to handle a command like BASE_BACKUP (COMPRESSION_DETAIL 'gzip:level=-1')). This code simplification fixes an issue with pg_basebackup --gzip introduced by ffd5365, where the tarball of the streamed WAL segments would be created as of pg_wal.tar.gz with uncompressed contents, while the intention is to compress the segments with gzip at a default level. The origin of the confusion comes from the handling of the default compression level of gzip (-1 or Z_DEFAULT_COMPRESSION) and the value of 0 was getting assigned, which is what walmethods.c would consider as equivalent to no compression when streaming WAL segments with its tar methods. Assigning always the compression level removes the confusion of some code paths considering a value of 0 set in a specification as either no compression or a default compression level. Note that 010_pg_basebackup.pl has to be adjusted to skip a few tests where the shape of the compression detail string for client and server-side compression was checked using gzip. This is a result of the code simplification, as gzip specifications cannot be used if a build does not support it. Reported-by: Tom Lane Reviewed-by: Tom Lane Discussion: https://postgr.es/m/1400032.1662217889@sss.pgh.pa.us Backpatch-through: 15
2022-09-14 05:16:57 +02:00
if (gzsetparams(streamer->gzfile, compress->level, Z_DEFAULT_STRATEGY) != Z_OK)
pg_fatal("could not set compression level %d: %s",
compress->level, get_gz_error(streamer->gzfile));
return &streamer->base;
#else
pg_fatal("this build does not support compression with %s", "gzip");
return NULL; /* keep compiler quiet */
#endif
}
#ifdef HAVE_LIBZ
/*
* Write archive content to gzip file.
*/
static void
bbstreamer_gzip_writer_content(bbstreamer *streamer,
bbstreamer_member *member, const char *data,
int len, bbstreamer_archive_context context)
{
bbstreamer_gzip_writer *mystreamer;
mystreamer = (bbstreamer_gzip_writer *) streamer;
if (len == 0)
return;
errno = 0;
if (gzwrite(mystreamer->gzfile, data, len) != len)
{
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
pg_fatal("could not write to compressed file \"%s\": %s",
mystreamer->pathname, get_gz_error(mystreamer->gzfile));
}
}
/*
* End-of-archive processing when writing to a gzip file consists of just
* calling gzclose.
*
* It makes no difference whether we opened the file or the caller did it,
* because libz provides no way of avoiding a close on the underlying file
* handle. Notice, however, that bbstreamer_gzip_writer_new() uses dup() to
* work around this issue, so that the behavior from the caller's viewpoint
* is the same as for bbstreamer_plain_writer.
*/
static void
bbstreamer_gzip_writer_finalize(bbstreamer *streamer)
{
bbstreamer_gzip_writer *mystreamer;
mystreamer = (bbstreamer_gzip_writer *) streamer;
errno = 0; /* in case gzclose() doesn't set it */
if (gzclose(mystreamer->gzfile) != 0)
pg_fatal("could not close compressed file \"%s\": %m",
mystreamer->pathname);
mystreamer->gzfile = NULL;
}
/*
* Free memory associated with this bbstreamer.
*/
static void
bbstreamer_gzip_writer_free(bbstreamer *streamer)
{
bbstreamer_gzip_writer *mystreamer;
mystreamer = (bbstreamer_gzip_writer *) streamer;
Assert(mystreamer->base.bbs_next == NULL);
Assert(mystreamer->gzfile == NULL);
pfree(mystreamer->pathname);
pfree(mystreamer);
}
/*
* Helper function for libz error reporting.
*/
static const char *
get_gz_error(gzFile gzf)
{
int errnum;
const char *errmsg;
errmsg = gzerror(gzf, &errnum);
if (errnum == Z_ERRNO)
return strerror(errno);
else
return errmsg;
}
#endif
/*
* Create a new base backup streamer that performs decompression of gzip
* compressed blocks.
*/
bbstreamer *
bbstreamer_gzip_decompressor_new(bbstreamer *next)
{
#ifdef HAVE_LIBZ
bbstreamer_gzip_decompressor *streamer;
z_stream *zs;
Assert(next != NULL);
streamer = palloc0(sizeof(bbstreamer_gzip_decompressor));
*((const bbstreamer_ops **) &streamer->base.bbs_ops) =
&bbstreamer_gzip_decompressor_ops;
streamer->base.bbs_next = next;
initStringInfo(&streamer->base.bbs_buffer);
/* Initialize internal stream state for decompression */
zs = &streamer->zstream;
zs->zalloc = gzip_palloc;
zs->zfree = gzip_pfree;
zs->next_out = (uint8 *) streamer->base.bbs_buffer.data;
zs->avail_out = streamer->base.bbs_buffer.maxlen;
/*
* Data compression was initialized using deflateInit2 to request a gzip
* header. Similarly, we are using inflateInit2 to initialize data
* decompression.
*
* Per the documentation for inflateInit2, the second argument is
* "windowBits" and its value must be greater than or equal to the value
* provided while compressing the data, so we are using the maximum
* possible value for safety.
*/
if (inflateInit2(zs, 15 + 16) != Z_OK)
pg_fatal("could not initialize compression library");
return &streamer->base;
#else
pg_fatal("this build does not support compression with %s", "gzip");
return NULL; /* keep compiler quiet */
#endif
}
#ifdef HAVE_LIBZ
/*
* Decompress the input data to output buffer until we run out of input
* data. Each time the output buffer is full, pass on the decompressed data
* to the next streamer.
*/
static void
bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
bbstreamer_member *member,
const char *data, int len,
bbstreamer_archive_context context)
{
bbstreamer_gzip_decompressor *mystreamer;
z_stream *zs;
mystreamer = (bbstreamer_gzip_decompressor *) streamer;
zs = &mystreamer->zstream;
zs->next_in = (const uint8 *) data;
zs->avail_in = len;
/* Process the current chunk */
while (zs->avail_in > 0)
{
int res;
Assert(mystreamer->bytes_written < mystreamer->base.bbs_buffer.maxlen);
zs->next_out = (uint8 *)
mystreamer->base.bbs_buffer.data + mystreamer->bytes_written;
zs->avail_out =
mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written;
/*
* This call decompresses data starting at zs->next_in and updates
* zs->next_in * and zs->avail_in. It generates output data starting
* at zs->next_out and updates zs->next_out and zs->avail_out
* accordingly.
*/
res = inflate(zs, Z_NO_FLUSH);
if (res == Z_STREAM_ERROR)
pg_log_error("could not decompress data: %s", zs->msg);
mystreamer->bytes_written =
mystreamer->base.bbs_buffer.maxlen - zs->avail_out;
/* If output buffer is full then pass data to next streamer */
if (mystreamer->bytes_written >= mystreamer->base.bbs_buffer.maxlen)
{
bbstreamer_content(mystreamer->base.bbs_next, member,
mystreamer->base.bbs_buffer.data,
mystreamer->base.bbs_buffer.maxlen, context);
mystreamer->bytes_written = 0;
}
}
}
/*
* End-of-stream processing.
*/
static void
bbstreamer_gzip_decompressor_finalize(bbstreamer *streamer)
{
bbstreamer_gzip_decompressor *mystreamer;
mystreamer = (bbstreamer_gzip_decompressor *) streamer;
/*
* End of the stream, if there is some pending data in output buffers then
* we must forward it to next streamer.
*/
bbstreamer_content(mystreamer->base.bbs_next, NULL,
mystreamer->base.bbs_buffer.data,
mystreamer->base.bbs_buffer.maxlen,
BBSTREAMER_UNKNOWN);
bbstreamer_finalize(mystreamer->base.bbs_next);
}
/*
* Free memory.
*/
static void
bbstreamer_gzip_decompressor_free(bbstreamer *streamer)
{
bbstreamer_free(streamer->bbs_next);
pfree(streamer->bbs_buffer.data);
pfree(streamer);
}
/*
* Wrapper function to adjust the signature of palloc to match what libz
* expects.
*/
static void *
gzip_palloc(void *opaque, unsigned items, unsigned size)
{
return palloc(items * size);
}
/*
* Wrapper function to adjust the signature of pfree to match what libz
* expects.
*/
static void
gzip_pfree(void *opaque, void *address)
{
pfree(address);
}
#endif