2021-05-07 16:56:14 +02:00
2024-01-04 02:49:05 +01:00
# Copyright (c) 2021-2024, PostgreSQL Global Development Group
2021-05-07 16:56:14 +02:00
2014-04-15 03:33:46 +02:00
use strict ;
2023-12-29 18:01:53 +01:00
use warnings FATAL = > 'all' ;
2024-02-13 13:47:12 +01:00
use Config ;
2018-03-27 15:14:40 +02:00
use File::Basename qw( basename dirname ) ;
2018-04-06 22:26:31 +02:00
use File::Path qw( rmtree ) ;
2021-10-24 16:28:19 +02:00
use PostgreSQL::Test::Cluster ;
use PostgreSQL::Test::Utils ;
2022-02-02 19:50:33 +01:00
use Test::More ;
2014-04-15 03:33:46 +02:00
program_help_ok ( 'pg_basebackup' ) ;
program_version_ok ( 'pg_basebackup' ) ;
program_options_handling_ok ( 'pg_basebackup' ) ;
2021-10-24 16:28:19 +02:00
my $ tempdir = PostgreSQL::Test::Utils:: tempdir ;
2014-04-15 03:33:46 +02:00
2021-10-24 16:28:19 +02:00
my $ node = PostgreSQL::Test::Cluster - > new ( 'main' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
2022-01-18 00:40:00 +01:00
# For nearly all pg_basebackup invocations some options should be specified,
# to keep test times reasonable. Using @pg_basebackup_defs as the first
2022-03-15 03:29:23 +01:00
# element of the array passed to IPC::Run interpolate the array (as it is
2022-01-18 00:40:00 +01:00
# not a reference to an array)...
my @ pg_basebackup_defs = ( 'pg_basebackup' , '--no-sync' , '-cfast' ) ;
2018-04-07 23:45:39 +02:00
# Set umask so test directories and files are created with default permissions
umask ( 0077 ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
# Initialize node without replication settings
2022-02-03 18:13:11 +01:00
$ node - > init (
extra = > [ '--data-checksums' ] ,
auth_extra = > [ '--create-role' , 'backupuser' ] ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > start ;
my $ pgdata = $ node - > data_dir ;
$ node - > command_fails ( [ 'pg_basebackup' ] ,
2014-04-15 03:33:46 +02:00
'pg_basebackup needs target directory specified' ) ;
2022-01-21 03:08:43 +01:00
# Sanity checks for options
$ node - > command_fails_like (
[ 'pg_basebackup' , '-D' , "$tempdir/backup" , '--compress' , 'none:1' ] ,
2022-03-23 14:19:14 +01:00
qr/\Qcompression algorithm "none" does not accept a compression level/ ,
2022-01-21 03:08:43 +01:00
'failure if method "none" specified with compression level' ) ;
$ node - > command_fails_like (
[ 'pg_basebackup' , '-D' , "$tempdir/backup" , '--compress' , 'none+' ] ,
2022-09-25 00:38:35 +02:00
qr/\Qunrecognized compression algorithm: "none+"/ ,
2022-01-21 03:08:43 +01:00
'failure on incorrect separator to define compression level' ) ;
2015-06-22 02:04:36 +02:00
# Some Windows ANSI code pages may reject this filename, in which case we
# quietly proceed without this bit of test coverage.
2017-03-27 04:24:13 +02:00
if ( open my $ badchars , '>>' , "$tempdir/pgdata/FOO\xe0\xe0\xe0BAR" )
2015-06-22 02:04:36 +02:00
{
2017-03-27 04:24:13 +02:00
print $ badchars "test backup of file with non-UTF8 name\n" ;
close $ badchars ;
2015-06-22 02:04:36 +02:00
}
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > set_replication_conf ( ) ;
Harden TAP tests that intentionally corrupt page checksums.
The previous method for doing that was to write zeroes into a
predetermined set of page locations. However, there's a roughly
1-in-64K chance that the existing checksum will match by chance,
and yesterday several buildfarm animals started to reproducibly
see that, resulting in test failures because no checksum mismatch
was reported.
Since the checksum includes the page LSN, test success depends on
the length of the installation's WAL history, which is affected by
(at least) the initial catalog contents, the set of locales installed
on the system, and the length of the pathname of the test directory.
Sooner or later we were going to hit a chance match, and today is
that day.
Harden these tests by specifically inverting the checksum field and
leaving all else alone, thereby guaranteeing that the checksum is
incorrect.
In passing, fix places that were using seek() to set up for syswrite(),
a combination that the Perl docs very explicitly warn against. We've
probably escaped problems because no regular buffered I/O is done on
these filehandles; but if it ever breaks, we wouldn't deserve or get
much sympathy.
Although we've only seen problems in HEAD, now that we recognize the
environmental dependencies it seems like it might be just a matter
of time until someone manages to hit this in back-branch testing.
Hence, back-patch to v11 where we started doing this kind of test.
Discussion: https://postgr.es/m/3192026.1648185780@sss.pgh.pa.us
2022-03-25 19:23:26 +01:00
$ node - > reload ;
2014-04-15 03:33:46 +02:00
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup" ] ,
2014-04-15 03:33:46 +02:00
'pg_basebackup fails because of WAL configuration' ) ;
2016-09-12 18:00:00 +02:00
ok ( ! - d "$tempdir/backup" , 'backup directory was cleaned up' ) ;
2019-06-03 06:44:03 +02:00
# Create a backup directory that is not empty so the next command will fail
2018-04-07 23:45:39 +02:00
# but leave the data directory behind
mkdir ( "$tempdir/backup" )
or BAIL_OUT ( "unable to create $tempdir/backup" ) ;
2018-04-09 21:45:48 +02:00
append_to_file ( "$tempdir/backup/dir-not-empty.txt" , "Some data" ) ;
2018-04-07 23:45:39 +02:00
2022-01-18 00:40:00 +01:00
$ node - > command_fails ( [ @ pg_basebackup_defs , '-D' , "$tempdir/backup" , '-n' ] ,
2016-10-19 18:00:00 +02:00
'failing run with no-clean option' ) ;
2016-09-12 18:00:00 +02:00
ok ( - d "$tempdir/backup" , 'backup directory was created and left behind' ) ;
2018-04-07 23:45:39 +02:00
rmtree ( "$tempdir/backup" ) ;
2016-09-12 18:00:00 +02:00
Activate perlcritic InputOutput::RequireCheckedSyscalls and fix resulting warnings
This checks that certain I/O-related Perl functions properly check
their return value. Some parts of the PostgreSQL code had been a bit
sloppy about that. The new perlcritic warnings are fixed here. I
didn't design any beautiful error messages, mostly just used "or die
$!", which mostly matches existing code, and also this is
developer-level code, so having the system error plus source code
reference should be ok.
Initially, we only activate this check for a subset of what the
perlcritic check would warn about. The effective list is
chmod flock open read rename seek symlink system
The initial set of functions is picked because most existing code
already checked the return value of those, so any omissions are
probably unintended, or because it seems important for test
correctness.
The actual perlcritic configuration is written as an exclude list.
That seems better so that we are clear on what we are currently not
checking. Maybe future patches want to investigate checking some of
the other functions. (In principle, we might eventually want to check
all of them, but since this is test and build support code, not
production code, there are probably some reasonable compromises to be
made.)
Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
Discussion: https://www.postgresql.org/message-id/flat/88b7d4f2-46d9-4cc7-b1f7-613c90f9a76a%40eisentraut.org
2024-03-19 07:01:22 +01:00
open my $ conf , '>>' , "$pgdata/postgresql.conf" or die $! ;
2017-03-27 04:24:13 +02:00
print $ conf "max_replication_slots = 10\n" ;
print $ conf "max_wal_senders = 10\n" ;
print $ conf "wal_level = replica\n" ;
close $ conf ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > restart ;
2014-04-15 03:33:46 +02:00
2022-03-23 14:19:14 +01:00
# Now that we have a server that supports replication commands, test whether
# certain invalid compression commands fail on the client side with client-side
# compression and on the server side with server-side compression.
Simplify handling of compression level with compression specifications
PG_COMPRESSION_OPTION_LEVEL is removed from the compression
specification logic, and instead the compression level is always
assigned with each library's default if nothing is directly given. This
centralizes the checks on the compression methods supported by a given
build, and always assigns a default compression level when parsing a
compression specification. This results in complaining at an earlier
stage than previously if a build supports a compression method or not,
aka when parsing a specification in the backend or the frontend, and not
when processing it. zstd, lz4 and zlib are able to handle in their
respective routines setting up the compression level the case of a
default value, hence the backend or frontend code (pg_receivewal or
pg_basebackup) has now no need to know what the default compression
level should be if nothing is specified: the logic is now done so as the
specification parsing assigns it. It can also be enforced by passing
down a "level" set to the default value, that the backend will accept
(the replication protocol is for example able to handle a command like
BASE_BACKUP (COMPRESSION_DETAIL 'gzip:level=-1')).
This code simplification fixes an issue with pg_basebackup --gzip
introduced by ffd5365, where the tarball of the streamed WAL segments
would be created as of pg_wal.tar.gz with uncompressed contents, while
the intention is to compress the segments with gzip at a default level.
The origin of the confusion comes from the handling of the default
compression level of gzip (-1 or Z_DEFAULT_COMPRESSION) and the value of
0 was getting assigned, which is what walmethods.c would consider
as equivalent to no compression when streaming WAL segments with its tar
methods. Assigning always the compression level removes the confusion
of some code paths considering a value of 0 set in a specification as
either no compression or a default compression level.
Note that 010_pg_basebackup.pl has to be adjusted to skip a few tests
where the shape of the compression detail string for client and
server-side compression was checked using gzip. This is a result of the
code simplification, as gzip specifications cannot be used if a build
does not support it.
Reported-by: Tom Lane
Reviewed-by: Tom Lane
Discussion: https://postgr.es/m/1400032.1662217889@sss.pgh.pa.us
Backpatch-through: 15
2022-09-14 05:16:57 +02:00
SKIP:
2022-03-23 14:19:14 +01:00
{
Simplify handling of compression level with compression specifications
PG_COMPRESSION_OPTION_LEVEL is removed from the compression
specification logic, and instead the compression level is always
assigned with each library's default if nothing is directly given. This
centralizes the checks on the compression methods supported by a given
build, and always assigns a default compression level when parsing a
compression specification. This results in complaining at an earlier
stage than previously if a build supports a compression method or not,
aka when parsing a specification in the backend or the frontend, and not
when processing it. zstd, lz4 and zlib are able to handle in their
respective routines setting up the compression level the case of a
default value, hence the backend or frontend code (pg_receivewal or
pg_basebackup) has now no need to know what the default compression
level should be if nothing is specified: the logic is now done so as the
specification parsing assigns it. It can also be enforced by passing
down a "level" set to the default value, that the backend will accept
(the replication protocol is for example able to handle a command like
BASE_BACKUP (COMPRESSION_DETAIL 'gzip:level=-1')).
This code simplification fixes an issue with pg_basebackup --gzip
introduced by ffd5365, where the tarball of the streamed WAL segments
would be created as of pg_wal.tar.gz with uncompressed contents, while
the intention is to compress the segments with gzip at a default level.
The origin of the confusion comes from the handling of the default
compression level of gzip (-1 or Z_DEFAULT_COMPRESSION) and the value of
0 was getting assigned, which is what walmethods.c would consider
as equivalent to no compression when streaming WAL segments with its tar
methods. Assigning always the compression level removes the confusion
of some code paths considering a value of 0 set in a specification as
either no compression or a default compression level.
Note that 010_pg_basebackup.pl has to be adjusted to skip a few tests
where the shape of the compression detail string for client and
server-side compression was checked using gzip. This is a result of the
code simplification, as gzip specifications cannot be used if a build
does not support it.
Reported-by: Tom Lane
Reviewed-by: Tom Lane
Discussion: https://postgr.es/m/1400032.1662217889@sss.pgh.pa.us
Backpatch-through: 15
2022-09-14 05:16:57 +02:00
skip "postgres was not built with ZLIB support" , 6
if ( ! check_pg_config ( "#define HAVE_LIBZ 1" ) ) ;
my $ client_fails = 'pg_basebackup: error: ' ;
my $ server_fails =
'pg_basebackup: error: could not initiate base backup: ERROR: ' ;
my @ compression_failure_tests = (
[
'extrasquishy' ,
2022-09-25 00:38:35 +02:00
'unrecognized compression algorithm: "extrasquishy"' ,
Simplify handling of compression level with compression specifications
PG_COMPRESSION_OPTION_LEVEL is removed from the compression
specification logic, and instead the compression level is always
assigned with each library's default if nothing is directly given. This
centralizes the checks on the compression methods supported by a given
build, and always assigns a default compression level when parsing a
compression specification. This results in complaining at an earlier
stage than previously if a build supports a compression method or not,
aka when parsing a specification in the backend or the frontend, and not
when processing it. zstd, lz4 and zlib are able to handle in their
respective routines setting up the compression level the case of a
default value, hence the backend or frontend code (pg_receivewal or
pg_basebackup) has now no need to know what the default compression
level should be if nothing is specified: the logic is now done so as the
specification parsing assigns it. It can also be enforced by passing
down a "level" set to the default value, that the backend will accept
(the replication protocol is for example able to handle a command like
BASE_BACKUP (COMPRESSION_DETAIL 'gzip:level=-1')).
This code simplification fixes an issue with pg_basebackup --gzip
introduced by ffd5365, where the tarball of the streamed WAL segments
would be created as of pg_wal.tar.gz with uncompressed contents, while
the intention is to compress the segments with gzip at a default level.
The origin of the confusion comes from the handling of the default
compression level of gzip (-1 or Z_DEFAULT_COMPRESSION) and the value of
0 was getting assigned, which is what walmethods.c would consider
as equivalent to no compression when streaming WAL segments with its tar
methods. Assigning always the compression level removes the confusion
of some code paths considering a value of 0 set in a specification as
either no compression or a default compression level.
Note that 010_pg_basebackup.pl has to be adjusted to skip a few tests
where the shape of the compression detail string for client and
server-side compression was checked using gzip. This is a result of the
code simplification, as gzip specifications cannot be used if a build
does not support it.
Reported-by: Tom Lane
Reviewed-by: Tom Lane
Discussion: https://postgr.es/m/1400032.1662217889@sss.pgh.pa.us
Backpatch-through: 15
2022-09-14 05:16:57 +02:00
'failure on invalid compression algorithm'
] ,
[
'gzip:' ,
'invalid compression specification: found empty string where a compression option was expected' ,
'failure on empty compression options list'
] ,
[
'gzip:thunk' ,
2022-09-25 00:38:35 +02:00
'invalid compression specification: unrecognized compression option: "thunk"' ,
Simplify handling of compression level with compression specifications
PG_COMPRESSION_OPTION_LEVEL is removed from the compression
specification logic, and instead the compression level is always
assigned with each library's default if nothing is directly given. This
centralizes the checks on the compression methods supported by a given
build, and always assigns a default compression level when parsing a
compression specification. This results in complaining at an earlier
stage than previously if a build supports a compression method or not,
aka when parsing a specification in the backend or the frontend, and not
when processing it. zstd, lz4 and zlib are able to handle in their
respective routines setting up the compression level the case of a
default value, hence the backend or frontend code (pg_receivewal or
pg_basebackup) has now no need to know what the default compression
level should be if nothing is specified: the logic is now done so as the
specification parsing assigns it. It can also be enforced by passing
down a "level" set to the default value, that the backend will accept
(the replication protocol is for example able to handle a command like
BASE_BACKUP (COMPRESSION_DETAIL 'gzip:level=-1')).
This code simplification fixes an issue with pg_basebackup --gzip
introduced by ffd5365, where the tarball of the streamed WAL segments
would be created as of pg_wal.tar.gz with uncompressed contents, while
the intention is to compress the segments with gzip at a default level.
The origin of the confusion comes from the handling of the default
compression level of gzip (-1 or Z_DEFAULT_COMPRESSION) and the value of
0 was getting assigned, which is what walmethods.c would consider
as equivalent to no compression when streaming WAL segments with its tar
methods. Assigning always the compression level removes the confusion
of some code paths considering a value of 0 set in a specification as
either no compression or a default compression level.
Note that 010_pg_basebackup.pl has to be adjusted to skip a few tests
where the shape of the compression detail string for client and
server-side compression was checked using gzip. This is a result of the
code simplification, as gzip specifications cannot be used if a build
does not support it.
Reported-by: Tom Lane
Reviewed-by: Tom Lane
Discussion: https://postgr.es/m/1400032.1662217889@sss.pgh.pa.us
Backpatch-through: 15
2022-09-14 05:16:57 +02:00
'failure on unknown compression option'
] ,
[
'gzip:level' ,
'invalid compression specification: compression option "level" requires a value' ,
'failure on missing compression level'
] ,
[
'gzip:level=' ,
'invalid compression specification: value for compression option "level" must be an integer' ,
'failure on empty compression level'
] ,
[
'gzip:level=high' ,
'invalid compression specification: value for compression option "level" must be an integer' ,
'failure on non-numeric compression level'
] ,
[
'gzip:level=236' ,
'invalid compression specification: compression algorithm "gzip" expects a compression level between 1 and 9' ,
'failure on out-of-range compression level'
] ,
2022-03-23 14:19:14 +01:00
[
Simplify handling of compression level with compression specifications
PG_COMPRESSION_OPTION_LEVEL is removed from the compression
specification logic, and instead the compression level is always
assigned with each library's default if nothing is directly given. This
centralizes the checks on the compression methods supported by a given
build, and always assigns a default compression level when parsing a
compression specification. This results in complaining at an earlier
stage than previously if a build supports a compression method or not,
aka when parsing a specification in the backend or the frontend, and not
when processing it. zstd, lz4 and zlib are able to handle in their
respective routines setting up the compression level the case of a
default value, hence the backend or frontend code (pg_receivewal or
pg_basebackup) has now no need to know what the default compression
level should be if nothing is specified: the logic is now done so as the
specification parsing assigns it. It can also be enforced by passing
down a "level" set to the default value, that the backend will accept
(the replication protocol is for example able to handle a command like
BASE_BACKUP (COMPRESSION_DETAIL 'gzip:level=-1')).
This code simplification fixes an issue with pg_basebackup --gzip
introduced by ffd5365, where the tarball of the streamed WAL segments
would be created as of pg_wal.tar.gz with uncompressed contents, while
the intention is to compress the segments with gzip at a default level.
The origin of the confusion comes from the handling of the default
compression level of gzip (-1 or Z_DEFAULT_COMPRESSION) and the value of
0 was getting assigned, which is what walmethods.c would consider
as equivalent to no compression when streaming WAL segments with its tar
methods. Assigning always the compression level removes the confusion
of some code paths considering a value of 0 set in a specification as
either no compression or a default compression level.
Note that 010_pg_basebackup.pl has to be adjusted to skip a few tests
where the shape of the compression detail string for client and
server-side compression was checked using gzip. This is a result of the
code simplification, as gzip specifications cannot be used if a build
does not support it.
Reported-by: Tom Lane
Reviewed-by: Tom Lane
Discussion: https://postgr.es/m/1400032.1662217889@sss.pgh.pa.us
Backpatch-through: 15
2022-09-14 05:16:57 +02:00
'gzip:level=9,' ,
'invalid compression specification: found empty string where a compression option was expected' ,
'failure on extra, empty compression option'
2022-03-23 14:19:14 +01:00
] ,
Simplify handling of compression level with compression specifications
PG_COMPRESSION_OPTION_LEVEL is removed from the compression
specification logic, and instead the compression level is always
assigned with each library's default if nothing is directly given. This
centralizes the checks on the compression methods supported by a given
build, and always assigns a default compression level when parsing a
compression specification. This results in complaining at an earlier
stage than previously if a build supports a compression method or not,
aka when parsing a specification in the backend or the frontend, and not
when processing it. zstd, lz4 and zlib are able to handle in their
respective routines setting up the compression level the case of a
default value, hence the backend or frontend code (pg_receivewal or
pg_basebackup) has now no need to know what the default compression
level should be if nothing is specified: the logic is now done so as the
specification parsing assigns it. It can also be enforced by passing
down a "level" set to the default value, that the backend will accept
(the replication protocol is for example able to handle a command like
BASE_BACKUP (COMPRESSION_DETAIL 'gzip:level=-1')).
This code simplification fixes an issue with pg_basebackup --gzip
introduced by ffd5365, where the tarball of the streamed WAL segments
would be created as of pg_wal.tar.gz with uncompressed contents, while
the intention is to compress the segments with gzip at a default level.
The origin of the confusion comes from the handling of the default
compression level of gzip (-1 or Z_DEFAULT_COMPRESSION) and the value of
0 was getting assigned, which is what walmethods.c would consider
as equivalent to no compression when streaming WAL segments with its tar
methods. Assigning always the compression level removes the confusion
of some code paths considering a value of 0 set in a specification as
either no compression or a default compression level.
Note that 010_pg_basebackup.pl has to be adjusted to skip a few tests
where the shape of the compression detail string for client and
server-side compression was checked using gzip. This is a result of the
code simplification, as gzip specifications cannot be used if a build
does not support it.
Reported-by: Tom Lane
Reviewed-by: Tom Lane
Discussion: https://postgr.es/m/1400032.1662217889@sss.pgh.pa.us
Backpatch-through: 15
2022-09-14 05:16:57 +02:00
[
'gzip:workers=3' ,
'invalid compression specification: compression algorithm "gzip" does not accept a worker count' ,
'failure on worker count for gzip'
2023-04-06 17:18:38 +02:00
] ,
[
'gzip:long' ,
'invalid compression specification: compression algorithm "gzip" does not support long-distance mode' ,
'failure on long mode for gzip'
] , ) ;
Simplify handling of compression level with compression specifications
PG_COMPRESSION_OPTION_LEVEL is removed from the compression
specification logic, and instead the compression level is always
assigned with each library's default if nothing is directly given. This
centralizes the checks on the compression methods supported by a given
build, and always assigns a default compression level when parsing a
compression specification. This results in complaining at an earlier
stage than previously if a build supports a compression method or not,
aka when parsing a specification in the backend or the frontend, and not
when processing it. zstd, lz4 and zlib are able to handle in their
respective routines setting up the compression level the case of a
default value, hence the backend or frontend code (pg_receivewal or
pg_basebackup) has now no need to know what the default compression
level should be if nothing is specified: the logic is now done so as the
specification parsing assigns it. It can also be enforced by passing
down a "level" set to the default value, that the backend will accept
(the replication protocol is for example able to handle a command like
BASE_BACKUP (COMPRESSION_DETAIL 'gzip:level=-1')).
This code simplification fixes an issue with pg_basebackup --gzip
introduced by ffd5365, where the tarball of the streamed WAL segments
would be created as of pg_wal.tar.gz with uncompressed contents, while
the intention is to compress the segments with gzip at a default level.
The origin of the confusion comes from the handling of the default
compression level of gzip (-1 or Z_DEFAULT_COMPRESSION) and the value of
0 was getting assigned, which is what walmethods.c would consider
as equivalent to no compression when streaming WAL segments with its tar
methods. Assigning always the compression level removes the confusion
of some code paths considering a value of 0 set in a specification as
either no compression or a default compression level.
Note that 010_pg_basebackup.pl has to be adjusted to skip a few tests
where the shape of the compression detail string for client and
server-side compression was checked using gzip. This is a result of the
code simplification, as gzip specifications cannot be used if a build
does not support it.
Reported-by: Tom Lane
Reviewed-by: Tom Lane
Discussion: https://postgr.es/m/1400032.1662217889@sss.pgh.pa.us
Backpatch-through: 15
2022-09-14 05:16:57 +02:00
for my $ cft ( @ compression_failure_tests )
{
my $ cfail = quotemeta ( $ client_fails . $ cft - > [ 1 ] ) ;
my $ sfail = quotemeta ( $ server_fails . $ cft - > [ 1 ] ) ;
$ node - > command_fails_like (
[
'pg_basebackup' , '-D' ,
"$tempdir/backup" , '--compress' ,
$ cft - > [ 0 ]
] ,
qr/$cfail/ ,
'client ' . $ cft - > [ 2 ] ) ;
$ node - > command_fails_like (
[
'pg_basebackup' , '-D' ,
"$tempdir/backup" , '--compress' ,
'server-' . $ cft - > [ 0 ]
] ,
qr/$sfail/ ,
'server ' . $ cft - > [ 2 ] ) ;
}
2022-03-23 14:19:14 +01:00
}
2016-09-28 18:00:00 +02:00
# Write some files to test that they are not copied.
2017-03-03 07:02:45 +01:00
foreach my $ filename (
2020-02-24 10:13:25 +01:00
qw( backup_label tablespace_map postgresql.auto.conf.tmp
current_logfiles . tmp global / pg_internal . init .123 ) )
2016-09-28 18:00:00 +02:00
{
Activate perlcritic InputOutput::RequireCheckedSyscalls and fix resulting warnings
This checks that certain I/O-related Perl functions properly check
their return value. Some parts of the PostgreSQL code had been a bit
sloppy about that. The new perlcritic warnings are fixed here. I
didn't design any beautiful error messages, mostly just used "or die
$!", which mostly matches existing code, and also this is
developer-level code, so having the system error plus source code
reference should be ok.
Initially, we only activate this check for a subset of what the
perlcritic check would warn about. The effective list is
chmod flock open read rename seek symlink system
The initial set of functions is picked because most existing code
already checked the return value of those, so any omissions are
probably unintended, or because it seems important for test
correctness.
The actual perlcritic configuration is written as an exclude list.
That seems better so that we are clear on what we are currently not
checking. Maybe future patches want to investigate checking some of
the other functions. (In principle, we might eventually want to check
all of them, but since this is test and build support code, not
production code, there are probably some reasonable compromises to be
made.)
Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
Discussion: https://www.postgresql.org/message-id/flat/88b7d4f2-46d9-4cc7-b1f7-613c90f9a76a%40eisentraut.org
2024-03-19 07:01:22 +01:00
open my $ file , '>>' , "$pgdata/$filename" or die $! ;
2017-03-27 04:24:13 +02:00
print $ file "DONOTCOPY" ;
close $ file ;
2016-09-28 18:00:00 +02:00
}
2024-02-13 13:47:12 +01:00
# Test that macOS system files are skipped. Only test on non-macOS systems
# however since creating incorrect .DS_Store files on a macOS system may have
# unintended side effects.
if ( $ Config { osname } ne 'darwin' )
{
Activate perlcritic InputOutput::RequireCheckedSyscalls and fix resulting warnings
This checks that certain I/O-related Perl functions properly check
their return value. Some parts of the PostgreSQL code had been a bit
sloppy about that. The new perlcritic warnings are fixed here. I
didn't design any beautiful error messages, mostly just used "or die
$!", which mostly matches existing code, and also this is
developer-level code, so having the system error plus source code
reference should be ok.
Initially, we only activate this check for a subset of what the
perlcritic check would warn about. The effective list is
chmod flock open read rename seek symlink system
The initial set of functions is picked because most existing code
already checked the return value of those, so any omissions are
probably unintended, or because it seems important for test
correctness.
The actual perlcritic configuration is written as an exclude list.
That seems better so that we are clear on what we are currently not
checking. Maybe future patches want to investigate checking some of
the other functions. (In principle, we might eventually want to check
all of them, but since this is test and build support code, not
production code, there are probably some reasonable compromises to be
made.)
Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
Discussion: https://www.postgresql.org/message-id/flat/88b7d4f2-46d9-4cc7-b1f7-613c90f9a76a%40eisentraut.org
2024-03-19 07:01:22 +01:00
open my $ file , '>>' , "$pgdata/.DS_Store" or die $! ;
2024-02-13 13:47:12 +01:00
print $ file "DONOTCOPY" ;
close $ file ;
}
2017-11-07 18:28:35 +01:00
# Connect to a database to create global/pg_internal.init. If this is removed
# the test to ensure global/pg_internal.init is not copied will return a false
# positive.
$ node - > safe_psql ( 'postgres' , 'SELECT 1;' ) ;
2018-03-23 17:14:12 +01:00
# Create an unlogged table to test that forks other than init are not copied.
$ node - > safe_psql ( 'postgres' , 'CREATE UNLOGGED TABLE base_unlogged (id int)' ) ;
my $ baseUnloggedPath = $ node - > safe_psql ( 'postgres' ,
q{ select pg_relation_filepath('base_unlogged') } ) ;
# Make sure main and init forks exist
ok ( - f "$pgdata/${baseUnloggedPath}_init" , 'unlogged init fork in base' ) ;
ok ( - f "$pgdata/$baseUnloggedPath" , 'unlogged main fork in base' ) ;
2018-03-27 15:14:40 +02:00
# Create files that look like temporary relations to ensure they are ignored.
my $ postgresOid = $ node - > safe_psql ( 'postgres' ,
q{ select oid from pg_database where datname = 'postgres' } ) ;
my @ tempRelationFiles =
qw( t999_999 t9999_999.1 t999_9999_vm t99999_99999_vm.1 ) ;
foreach my $ filename ( @ tempRelationFiles )
{
append_to_file ( "$pgdata/base/$postgresOid/$filename" , 'TEMP_RELATION' ) ;
}
# Run base backup.
2022-01-18 00:40:00 +01:00
$ node - > command_ok (
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup" , '-X' , 'none' ] ,
2014-04-15 03:33:46 +02:00
'pg_basebackup runs' ) ;
ok ( - f "$tempdir/backup/PG_VERSION" , 'backup was created' ) ;
Generate backup manifests for base backups, and validate them.
A manifest is a JSON document which includes (1) the file name, size,
last modification time, and an optional checksum for each file backed
up, (2) timelines and LSNs for whatever WAL will need to be replayed
to make the backup consistent, and (3) a checksum for the manifest
itself. By default, we use CRC-32C when checksumming data files,
because we are trying to detect corruption and user error, not foil an
adversary. However, pg_basebackup and the server-side BASE_BACKUP
command now have options to select a different algorithm, so users
wanting a cryptographic hash function can select SHA-224, SHA-256,
SHA-384, or SHA-512. Users not wanting file checksums at all can
disable them, or disable generating of the backup manifest altogether.
Using a cryptographic hash function in place of CRC-32C consumes
significantly more CPU cycles, which may slow down backups in some
cases.
A new tool called pg_validatebackup can validate a backup against the
manifest. If no checksums are present, it can still check that the
right files exist and that they have the expected sizes. If checksums
are present, it can also verify that each file has the expected
checksum. Additionally, it calls pg_waldump to verify that the
expected WAL files are present and parseable. Only plain format
backups can be validated directly, but tar format backups can be
validated after extracting them.
Robert Haas, with help, ideas, review, and testing from David Steele,
Stephen Frost, Andrew Dunstan, Rushabh Lathia, Suraj Kharage, Tushar
Ahuja, Rajkumar Raghuwanshi, Mark Dilger, Davinder Singh, Jeevan
Chalke, Amit Kapila, Andres Freund, and Noah Misch.
Discussion: http://postgr.es/m/CA+TgmoZV8dw1H2bzZ9xkKwdrk8+XYa+DC9H=F7heO2zna5T6qg@mail.gmail.com
2020-04-03 20:59:47 +02:00
ok ( - f "$tempdir/backup/backup_manifest" , 'backup manifest included' ) ;
2014-04-15 03:33:46 +02:00
2018-04-07 23:45:39 +02:00
# Permissions on backup should be default
SKIP:
{
skip "unix-style permissions not supported on Windows" , 1
if ( $ windows_os ) ;
ok ( check_mode_recursive ( "$tempdir/backup" , 0700 , 0600 ) ,
"check backup dir permissions" ) ;
}
2023-12-20 15:49:12 +01:00
# Only archive_status and summaries directories should be copied in pg_wal/.
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
is_deeply (
2016-10-20 17:24:37 +02:00
[ sort ( slurp_dir ( "$tempdir/backup/pg_wal/" ) ) ] ,
2023-12-20 15:49:12 +01:00
[ sort qw( . .. archive_status summaries ) ] ,
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
'no WAL files copied' ) ;
2015-07-01 03:15:29 +02:00
2016-09-28 18:00:00 +02:00
# Contents of these directories should not be copied.
foreach my $ dirname (
qw( pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots pg_stat_tmp pg_subtrans )
2017-05-18 01:01:23 +02:00
)
2016-09-28 18:00:00 +02:00
{
is_deeply (
[ sort ( slurp_dir ( "$tempdir/backup/$dirname/" ) ) ] ,
[ sort qw( . .. ) ] ,
"contents of $dirname/ not copied" ) ;
}
# These files should not be copied.
2017-03-03 07:02:45 +01:00
foreach my $ filename (
2017-11-07 18:28:35 +01:00
qw( postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp
2020-02-24 10:13:25 +01:00
global /pg_internal.init global/ pg_internal . init .123 ) )
2016-09-28 18:00:00 +02:00
{
ok ( ! - f "$tempdir/backup/$filename" , "$filename not copied" ) ;
}
2024-02-13 13:47:12 +01:00
# We only test .DS_Store files being skipped on non-macOS systems
if ( $ Config { osname } ne 'darwin' )
{
ok ( ! - f "$tempdir/backup/.DS_Store" , ".DS_Store not copied" ) ;
}
2018-03-23 17:14:12 +01:00
# Unlogged relation forks other than init should not be copied
ok ( - f "$tempdir/backup/${baseUnloggedPath}_init" ,
'unlogged init fork in backup' ) ;
ok ( ! - f "$tempdir/backup/$baseUnloggedPath" ,
'unlogged main fork not in backup' ) ;
2018-03-27 15:14:40 +02:00
# Temp relations should not be copied.
foreach my $ filename ( @ tempRelationFiles )
{
ok ( ! - f "$tempdir/backup/base/$postgresOid/$filename" ,
"base/$postgresOid/$filename not copied" ) ;
}
2016-09-28 18:00:00 +02:00
# Make sure existing backup_label was ignored.
isnt ( slurp_file ( "$tempdir/backup/backup_label" ) ,
'DONOTCOPY' , 'existing backup_label not copied' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backup" ) ;
2016-09-28 18:00:00 +02:00
2022-04-06 20:41:03 +02:00
# Now delete the bogus backup_label file since it will interfere with startup
unlink ( "$pgdata/backup_label" )
or BAIL_OUT ( "unable to unlink $pgdata/backup_label" ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_ok (
2018-05-09 16:14:46 +02:00
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
"$tempdir/backup2" , '--no-manifest' ,
'--waldir' , "$tempdir/xlog2"
2018-05-09 16:14:46 +02:00
] ,
2014-04-15 03:33:46 +02:00
'separate xlog directory' ) ;
ok ( - f "$tempdir/backup2/PG_VERSION" , 'backup was created' ) ;
Generate backup manifests for base backups, and validate them.
A manifest is a JSON document which includes (1) the file name, size,
last modification time, and an optional checksum for each file backed
up, (2) timelines and LSNs for whatever WAL will need to be replayed
to make the backup consistent, and (3) a checksum for the manifest
itself. By default, we use CRC-32C when checksumming data files,
because we are trying to detect corruption and user error, not foil an
adversary. However, pg_basebackup and the server-side BASE_BACKUP
command now have options to select a different algorithm, so users
wanting a cryptographic hash function can select SHA-224, SHA-256,
SHA-384, or SHA-512. Users not wanting file checksums at all can
disable them, or disable generating of the backup manifest altogether.
Using a cryptographic hash function in place of CRC-32C consumes
significantly more CPU cycles, which may slow down backups in some
cases.
A new tool called pg_validatebackup can validate a backup against the
manifest. If no checksums are present, it can still check that the
right files exist and that they have the expected sizes. If checksums
are present, it can also verify that each file has the expected
checksum. Additionally, it calls pg_waldump to verify that the
expected WAL files are present and parseable. Only plain format
backups can be validated directly, but tar format backups can be
validated after extracting them.
Robert Haas, with help, ideas, review, and testing from David Steele,
Stephen Frost, Andrew Dunstan, Rushabh Lathia, Suraj Kharage, Tushar
Ahuja, Rajkumar Raghuwanshi, Mark Dilger, Davinder Singh, Jeevan
Chalke, Amit Kapila, Andres Freund, and Noah Misch.
Discussion: http://postgr.es/m/CA+TgmoZV8dw1H2bzZ9xkKwdrk8+XYa+DC9H=F7heO2zna5T6qg@mail.gmail.com
2020-04-03 20:59:47 +02:00
ok ( ! - f "$tempdir/backup2/backup_manifest" , 'manifest was suppressed' ) ;
2014-04-15 03:33:46 +02:00
ok ( - d "$tempdir/xlog2/" , 'xlog directory was created' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backup2" ) ;
rmtree ( "$tempdir/xlog2" ) ;
2014-04-15 03:33:46 +02:00
2022-01-18 00:40:00 +01:00
$ node - > command_ok ( [ @ pg_basebackup_defs , '-D' , "$tempdir/tarbackup" , '-Ft' ] ,
2014-04-15 03:33:46 +02:00
'tar format' ) ;
ok ( - f "$tempdir/tarbackup/base.tar" , 'backup tar was created' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/tarbackup" ) ;
2014-04-15 03:33:46 +02:00
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup_foo" , '-Fp' , "-T=/foo" ] ,
2014-04-15 03:33:46 +02:00
'-T with empty old directory fails' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup_foo" , '-Fp' , "-T/foo=" ] ,
2014-04-15 03:33:46 +02:00
'-T with empty new directory fails' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_fails (
2018-05-09 16:14:46 +02:00
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' , "$tempdir/backup_foo" , '-Fp' ,
2018-05-09 16:14:46 +02:00
"-T/foo=/bar=/baz"
] ,
2014-04-15 03:33:46 +02:00
'-T with multiple = fails' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup_foo" , '-Fp' , "-Tfoo=/bar" ] ,
2014-04-15 03:33:46 +02:00
'-T with old directory not absolute fails' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup_foo" , '-Fp' , "-T/foo=bar" ] ,
2014-04-15 03:33:46 +02:00
'-T with new directory not absolute fails' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup_foo" , '-Fp' , "-Tfoo" ] ,
2014-04-15 03:33:46 +02:00
'-T with invalid format fails' ) ;
2015-02-24 19:41:07 +01:00
2015-07-29 18:17:02 +02:00
my $ superlongname = "superlongname_" . ( "x" x 100 ) ;
2023-07-06 18:27:40 +02:00
# Tar format doesn't support filenames longer than 100 bytes.
SKIP:
{
my $ superlongpath = "$pgdata/$superlongname" ;
2015-07-29 18:17:02 +02:00
2023-07-06 18:27:40 +02:00
skip "File path too long" , 1
if $ windows_os && length ( $ superlongpath ) > 255 ;
open my $ file , '>' , "$superlongpath"
or die "unable to create file $superlongpath" ;
close $ file ;
$ node - > command_fails (
[ @ pg_basebackup_defs , '-D' , "$tempdir/tarbackup_l1" , '-Ft' ] ,
'pg_basebackup tar with long name fails' ) ;
unlink "$superlongpath" ;
}
2015-07-29 18:17:02 +02:00
2020-07-16 20:48:37 +02:00
# The following tests are for symlinks.
# Move pg_replslot out of $pgdata and create a symlink to it.
$ node - > stop ;
# Set umask so test directories and files are created with group permissions
umask ( 0027 ) ;
# Enable group permissions on PGDATA
chmod_recursive ( "$pgdata" , 0750 , 0640 ) ;
2023-07-08 17:21:58 +02:00
# Create a temporary directory in the system location.
my $ sys_tempdir = PostgreSQL::Test::Utils:: tempdir_short ;
2023-07-08 18:34:25 +02:00
# On Windows use the short location to avoid path length issues.
# Elsewhere use $tempdir to avoid file system boundary issues with moving.
my $ tmploc = $ windows_os ? $ sys_tempdir : $ tempdir ;
rename ( "$pgdata/pg_replslot" , "$tmploc/pg_replslot" )
2020-07-16 20:48:37 +02:00
or BAIL_OUT "could not move $pgdata/pg_replslot" ;
2023-07-08 18:34:25 +02:00
dir_symlink ( "$tmploc/pg_replslot" , "$pgdata/pg_replslot" )
2020-07-16 20:48:37 +02:00
or BAIL_OUT "could not symlink to $pgdata/pg_replslot" ;
$ node - > start ;
Add end-to-end testing of pg_basebackup's tar-format output.
The existing test script does run pg_basebackup with the -Ft option,
but it makes no real attempt to verify the sanity of the results.
We wouldn't know if the output is incompatible with standard "tar"
programs, nor if the server fails to start from the restored output.
Notably, this means that xlog.c's read_tablespace_map() is not being
meaningfully tested, since that code is used only in the tar-format
case. (We do have reasonable coverage of restoring from plain-format
output, though it's over in src/test/recovery not here.)
Hence, attempt to untar the output and start a server from it,
rather just hoping it's OK.
This test assumes that the local "tar" has the "-C directory"
switch. Although that's not promised by POSIX, my research
suggests that all non-extinct tar implementations have it.
Should the buildfarm's opinion differ, we can complicate the
test a bit to avoid requiring that.
Possibly this should be back-patched, but I'm unsure about
whether it could work on Windows before d66b23b03.
2021-03-17 19:52:55 +01:00
# Test backup of a tablespace using tar format.
2023-07-08 17:21:58 +02:00
# Symlink the system located tempdir to our physical temp location.
# That way we can use shorter names for the tablespace directories,
# which hopefully won't run afoul of the 99 character length limit.
2022-02-18 23:00:03 +01:00
my $ real_sys_tempdir = "$sys_tempdir/tempdir" ;
dir_symlink "$tempdir" , $ real_sys_tempdir ;
2020-07-16 20:48:37 +02:00
mkdir "$tempdir/tblspc1" ;
2021-07-29 18:15:03 +02:00
my $ realTsDir = "$real_sys_tempdir/tblspc1" ;
2020-07-16 20:48:37 +02:00
$ node - > safe_psql ( 'postgres' ,
"CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';" ) ;
$ node - > safe_psql ( 'postgres' ,
Add end-to-end testing of pg_basebackup's tar-format output.
The existing test script does run pg_basebackup with the -Ft option,
but it makes no real attempt to verify the sanity of the results.
We wouldn't know if the output is incompatible with standard "tar"
programs, nor if the server fails to start from the restored output.
Notably, this means that xlog.c's read_tablespace_map() is not being
meaningfully tested, since that code is used only in the tar-format
case. (We do have reasonable coverage of restoring from plain-format
output, though it's over in src/test/recovery not here.)
Hence, attempt to untar the output and start a server from it,
rather just hoping it's OK.
This test assumes that the local "tar" has the "-C directory"
switch. Although that's not promised by POSIX, my research
suggests that all non-extinct tar implementations have it.
Should the buildfarm's opinion differ, we can complicate the
test a bit to avoid requiring that.
Possibly this should be back-patched, but I'm unsure about
whether it could work on Windows before d66b23b03.
2021-03-17 19:52:55 +01:00
"CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
. "INSERT INTO test1 VALUES (1234);" ) ;
$ node - > backup ( 'tarbackup2' , backup_options = > [ '-Ft' ] ) ;
# empty test1, just so that it's different from the to-be-restored data
$ node - > safe_psql ( 'postgres' , "TRUNCATE TABLE test1;" ) ;
# basic checks on the output
my $ backupdir = $ node - > backup_dir . '/tarbackup2' ;
ok ( - f "$backupdir/base.tar" , 'backup tar was created' ) ;
ok ( - f "$backupdir/pg_wal.tar" , 'WAL tar was created' ) ;
my @ tblspc_tars = glob "$backupdir/[0-9]*.tar" ;
2020-07-16 20:48:37 +02:00
is ( scalar ( @ tblspc_tars ) , 1 , 'one tablespace tar was created' ) ;
Add end-to-end testing of pg_basebackup's tar-format output.
The existing test script does run pg_basebackup with the -Ft option,
but it makes no real attempt to verify the sanity of the results.
We wouldn't know if the output is incompatible with standard "tar"
programs, nor if the server fails to start from the restored output.
Notably, this means that xlog.c's read_tablespace_map() is not being
meaningfully tested, since that code is used only in the tar-format
case. (We do have reasonable coverage of restoring from plain-format
output, though it's over in src/test/recovery not here.)
Hence, attempt to untar the output and start a server from it,
rather just hoping it's OK.
This test assumes that the local "tar" has the "-C directory"
switch. Although that's not promised by POSIX, my research
suggests that all non-extinct tar implementations have it.
Should the buildfarm's opinion differ, we can complicate the
test a bit to avoid requiring that.
Possibly this should be back-patched, but I'm unsure about
whether it could work on Windows before d66b23b03.
2021-03-17 19:52:55 +01:00
# Try to verify the tar-format backup by restoring it.
# For this, we use the tar program identified by configure.
SKIP:
{
my $ tar = $ ENV { TAR } ;
2022-03-15 03:29:23 +01:00
# don't check for a working tar here, to accommodate various odd
Remove AIX support
There isn't a lot of user demand for AIX support, we have a bunch of
hacks to work around AIX-specific compiler bugs and idiosyncrasies,
and no one has stepped up to the plate to properly maintain it.
Remove support for AIX to get rid of that maintenance overhead. It's
still supported for stable versions.
The acute issue that triggered this decision was that after commit
8af2565248, the AIX buildfarm members have been hitting this
assertion:
TRAP: failed Assert("(uintptr_t) buffer == TYPEALIGN(PG_IO_ALIGN_SIZE, buffer)"), File: "md.c", Line: 472, PID: 2949728
Apperently the "pg_attribute_aligned(a)" attribute doesn't work on AIX
for values larger than PG_IO_ALIGN_SIZE, for a static const variable.
That could be worked around, but we decided to just drop the AIX support
instead.
Discussion: https://www.postgresql.org/message-id/20240224172345.32@rfd.leadboat.com
Reviewed-by: Andres Freund, Noah Misch, Thomas Munro
2024-02-28 12:10:51 +01:00
# cases. If tar doesn't work the init_from_backup below will fail.
Add end-to-end testing of pg_basebackup's tar-format output.
The existing test script does run pg_basebackup with the -Ft option,
but it makes no real attempt to verify the sanity of the results.
We wouldn't know if the output is incompatible with standard "tar"
programs, nor if the server fails to start from the restored output.
Notably, this means that xlog.c's read_tablespace_map() is not being
meaningfully tested, since that code is used only in the tar-format
case. (We do have reasonable coverage of restoring from plain-format
output, though it's over in src/test/recovery not here.)
Hence, attempt to untar the output and start a server from it,
rather just hoping it's OK.
This test assumes that the local "tar" has the "-C directory"
switch. Although that's not promised by POSIX, my research
suggests that all non-extinct tar implementations have it.
Should the buildfarm's opinion differ, we can complicate the
test a bit to avoid requiring that.
Possibly this should be back-patched, but I'm unsure about
whether it could work on Windows before d66b23b03.
2021-03-17 19:52:55 +01:00
skip "no tar program available" , 1
2021-12-08 22:45:39 +01:00
if ( ! defined $ tar || $ tar eq '' ) ;
Add end-to-end testing of pg_basebackup's tar-format output.
The existing test script does run pg_basebackup with the -Ft option,
but it makes no real attempt to verify the sanity of the results.
We wouldn't know if the output is incompatible with standard "tar"
programs, nor if the server fails to start from the restored output.
Notably, this means that xlog.c's read_tablespace_map() is not being
meaningfully tested, since that code is used only in the tar-format
case. (We do have reasonable coverage of restoring from plain-format
output, though it's over in src/test/recovery not here.)
Hence, attempt to untar the output and start a server from it,
rather just hoping it's OK.
This test assumes that the local "tar" has the "-C directory"
switch. Although that's not promised by POSIX, my research
suggests that all non-extinct tar implementations have it.
Should the buildfarm's opinion differ, we can complicate the
test a bit to avoid requiring that.
Possibly this should be back-patched, but I'm unsure about
whether it could work on Windows before d66b23b03.
2021-03-17 19:52:55 +01:00
2021-10-24 16:28:19 +02:00
my $ node2 = PostgreSQL::Test::Cluster - > new ( 'replica' ) ;
Add end-to-end testing of pg_basebackup's tar-format output.
The existing test script does run pg_basebackup with the -Ft option,
but it makes no real attempt to verify the sanity of the results.
We wouldn't know if the output is incompatible with standard "tar"
programs, nor if the server fails to start from the restored output.
Notably, this means that xlog.c's read_tablespace_map() is not being
meaningfully tested, since that code is used only in the tar-format
case. (We do have reasonable coverage of restoring from plain-format
output, though it's over in src/test/recovery not here.)
Hence, attempt to untar the output and start a server from it,
rather just hoping it's OK.
This test assumes that the local "tar" has the "-C directory"
switch. Although that's not promised by POSIX, my research
suggests that all non-extinct tar implementations have it.
Should the buildfarm's opinion differ, we can complicate the
test a bit to avoid requiring that.
Possibly this should be back-patched, but I'm unsure about
whether it could work on Windows before d66b23b03.
2021-03-17 19:52:55 +01:00
2024-04-17 21:56:33 +02:00
# Recover the backup
Add end-to-end testing of pg_basebackup's tar-format output.
The existing test script does run pg_basebackup with the -Ft option,
but it makes no real attempt to verify the sanity of the results.
We wouldn't know if the output is incompatible with standard "tar"
programs, nor if the server fails to start from the restored output.
Notably, this means that xlog.c's read_tablespace_map() is not being
meaningfully tested, since that code is used only in the tar-format
case. (We do have reasonable coverage of restoring from plain-format
output, though it's over in src/test/recovery not here.)
Hence, attempt to untar the output and start a server from it,
rather just hoping it's OK.
This test assumes that the local "tar" has the "-C directory"
switch. Although that's not promised by POSIX, my research
suggests that all non-extinct tar implementations have it.
Should the buildfarm's opinion differ, we can complicate the
test a bit to avoid requiring that.
Possibly this should be back-patched, but I'm unsure about
whether it could work on Windows before d66b23b03.
2021-03-17 19:52:55 +01:00
$ tblspc_tars [ 0 ] =~ m | / ( [ 0 - 9 ] * ) \ . tar $| ;
my $ tblspcoid = $ 1 ;
2024-04-17 21:56:33 +02:00
my $ realRepTsDir = "$real_sys_tempdir/tblspc1replica" ;
$ node2 - > init_from_backup ( $ node , 'tarbackup2' , tar_program = > $ tar ,
'tablespace_map' = > { $ tblspcoid = > $ realRepTsDir } ) ;
Add end-to-end testing of pg_basebackup's tar-format output.
The existing test script does run pg_basebackup with the -Ft option,
but it makes no real attempt to verify the sanity of the results.
We wouldn't know if the output is incompatible with standard "tar"
programs, nor if the server fails to start from the restored output.
Notably, this means that xlog.c's read_tablespace_map() is not being
meaningfully tested, since that code is used only in the tar-format
case. (We do have reasonable coverage of restoring from plain-format
output, though it's over in src/test/recovery not here.)
Hence, attempt to untar the output and start a server from it,
rather just hoping it's OK.
This test assumes that the local "tar" has the "-C directory"
switch. Although that's not promised by POSIX, my research
suggests that all non-extinct tar implementations have it.
Should the buildfarm's opinion differ, we can complicate the
test a bit to avoid requiring that.
Possibly this should be back-patched, but I'm unsure about
whether it could work on Windows before d66b23b03.
2021-03-17 19:52:55 +01:00
$ node2 - > start ;
my $ result = $ node2 - > safe_psql ( 'postgres' , 'SELECT * FROM test1' ) ;
is ( $ result , '1234' , "tablespace data restored from tar-format backup" ) ;
$ node2 - > stop ;
}
2020-07-16 20:48:37 +02:00
# Create an unlogged table to test that forks other than init are not copied.
$ node - > safe_psql ( 'postgres' ,
'CREATE UNLOGGED TABLE tblspc1_unlogged (id int) TABLESPACE tblspc1;' ) ;
my $ tblspc1UnloggedPath = $ node - > safe_psql ( 'postgres' ,
q{ select pg_relation_filepath('tblspc1_unlogged') } ) ;
# Make sure main and init forks exist
ok ( - f "$pgdata/${tblspc1UnloggedPath}_init" ,
'unlogged init fork in tablespace' ) ;
ok ( - f "$pgdata/$tblspc1UnloggedPath" , 'unlogged main fork in tablespace' ) ;
# Create files that look like temporary relations to ensure they are ignored
# in a tablespace.
@ tempRelationFiles = qw( t888_888 t888888_888888_vm.1 ) ;
my $ tblSpc1Id = basename (
dirname (
dirname (
$ node - > safe_psql (
'postgres' , q{ select pg_relation_filepath('test1') } ) ) ) ) ;
foreach my $ filename ( @ tempRelationFiles )
{
append_to_file (
2022-02-18 23:00:03 +01:00
"$real_sys_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename" ,
2020-07-16 20:48:37 +02:00
'TEMP_RELATION' ) ;
}
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup1" , '-Fp' ] ,
2020-07-16 20:48:37 +02:00
'plain format with tablespaces fails without tablespace mapping' ) ;
$ node - > command_ok (
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
"$tempdir/backup1" , '-Fp' ,
2022-02-18 23:00:03 +01:00
"-T$realTsDir=$tempdir/tbackup/tblspc1" ,
2020-07-16 20:48:37 +02:00
] ,
'plain format with tablespaces succeeds with tablespace mapping' ) ;
ok ( - d "$tempdir/tbackup/tblspc1" , 'tablespace was relocated' ) ;
# This symlink check is not supported on Windows as -l
# doesn't work with junctions
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
SKIP:
{
2020-07-16 20:48:37 +02:00
skip "symlink check not implemented on Windows" , 1
if ( $ windows_os ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
opendir ( my $ dh , "$pgdata/pg_tblspc" ) or die ;
2015-07-29 18:17:02 +02:00
ok ( ( grep {
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
- l "$tempdir/backup1/pg_tblspc/$_"
and readlink "$tempdir/backup1/pg_tblspc/$_" eq
"$tempdir/tbackup/tblspc1"
} readdir ( $ dh ) ) ,
2015-07-29 18:17:02 +02:00
"tablespace symlink was updated" ) ;
closedir $ dh ;
2020-07-16 20:48:37 +02:00
}
# Group access should be enabled on all backup files
SKIP:
{
skip "unix-style permissions not supported on Windows" , 1
if ( $ windows_os ) ;
2015-07-29 18:17:02 +02:00
2018-04-07 23:45:39 +02:00
ok ( check_mode_recursive ( "$tempdir/backup1" , 0750 , 0640 ) ,
"check backup dir permissions" ) ;
2020-07-16 20:48:37 +02:00
}
# Unlogged relation forks other than init should not be copied
my ( $ tblspc1UnloggedBackupPath ) =
$ tblspc1UnloggedPath =~ /[^\/]*\/[^\/]*\/[^\/]*$/g ;
ok ( - f "$tempdir/tbackup/tblspc1/${tblspc1UnloggedBackupPath}_init" ,
'unlogged init fork in tablespace backup' ) ;
ok ( ! - f "$tempdir/tbackup/tblspc1/$tblspc1UnloggedBackupPath" ,
'unlogged main fork not in tablespace backup' ) ;
2018-04-07 23:45:39 +02:00
2020-07-16 20:48:37 +02:00
# Temp relations should not be copied.
foreach my $ filename ( @ tempRelationFiles )
{
ok ( ! - f "$tempdir/tbackup/tblspc1/$tblSpc1Id/$postgresOid/$filename" ,
"[tblspc1]/$postgresOid/$filename not copied" ) ;
# Also remove temp relation files or tablespace drop will fail.
my $ filepath =
2022-02-18 23:00:03 +01:00
"$real_sys_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename" ;
2020-07-16 20:48:37 +02:00
unlink ( $ filepath )
or BAIL_OUT ( "unable to unlink $filepath" ) ;
2015-07-29 18:17:02 +02:00
}
2015-07-01 03:15:05 +02:00
2020-07-16 20:48:37 +02:00
ok ( - d "$tempdir/backup1/pg_replslot" ,
'pg_replslot symlink copied as directory' ) ;
rmtree ( "$tempdir/backup1" ) ;
mkdir "$tempdir/tbl=spc2" ;
2021-07-29 18:15:03 +02:00
$ realTsDir = "$real_sys_tempdir/tbl=spc2" ;
2020-07-16 20:48:37 +02:00
$ node - > safe_psql ( 'postgres' , "DROP TABLE test1;" ) ;
$ node - > safe_psql ( 'postgres' , "DROP TABLE tblspc1_unlogged;" ) ;
$ node - > safe_psql ( 'postgres' , "DROP TABLESPACE tblspc1;" ) ;
$ node - > safe_psql ( 'postgres' ,
"CREATE TABLESPACE tblspc2 LOCATION '$realTsDir';" ) ;
$ realTsDir =~ s/=/\\=/ ;
$ node - > command_ok (
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
"$tempdir/backup3" , '-Fp' ,
2022-02-18 23:00:03 +01:00
"-T$realTsDir=$tempdir/tbackup/tbl\\=spc2" ,
2020-07-16 20:48:37 +02:00
] ,
'mapping tablespace with = sign in path' ) ;
ok ( - d "$tempdir/tbackup/tbl=spc2" , 'tablespace with = sign was relocated' ) ;
$ node - > safe_psql ( 'postgres' , "DROP TABLESPACE tblspc2;" ) ;
rmtree ( "$tempdir/backup3" ) ;
mkdir "$tempdir/$superlongname" ;
2021-07-29 18:15:03 +02:00
$ realTsDir = "$real_sys_tempdir/$superlongname" ;
2020-07-16 20:48:37 +02:00
$ node - > safe_psql ( 'postgres' ,
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';" ) ;
2022-01-18 00:40:00 +01:00
$ node - > command_ok (
[ @ pg_basebackup_defs , '-D' , "$tempdir/tarbackup_l3" , '-Ft' ] ,
2020-07-16 20:48:37 +02:00
'pg_basebackup tar with long symlink target' ) ;
$ node - > safe_psql ( 'postgres' , "DROP TABLESPACE tblspc3;" ) ;
rmtree ( "$tempdir/tarbackup_l3" ) ;
2022-01-18 00:40:00 +01:00
$ node - > command_ok ( [ @ pg_basebackup_defs , '-D' , "$tempdir/backupR" , '-R' ] ,
2015-07-01 03:15:05 +02:00
'pg_basebackup -R runs' ) ;
2018-11-25 16:31:16 +01:00
ok ( - f "$tempdir/backupR/postgresql.auto.conf" , 'postgresql.auto.conf exists' ) ;
ok ( - f "$tempdir/backupR/standby.signal" , 'standby.signal was created' ) ;
my $ recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf" ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupR" ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
2015-12-07 23:25:31 +01:00
my $ port = $ node - > port ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
like (
$ recovery_conf ,
Change qr/foo$/m to qr/foo\n/m, for Perl 5.8.8.
In each case, absence of a trailing newline would itself constitute a
PostgreSQL bug. Therefore, this slightly enhances the changed tests.
This works around a bug that last appeared in Perl 5.8.8, fixing
src/test/modules/test_pg_dump when run against that version. Commit
e7293e3271bf618eeb2d4779a15fc516a69fe463 worked around the bug, but the
subsequent addition of test_pg_dump introduced affected code. As that
commit had shown, slight increases in pattern complexity can suppress
the bug. This commit edits qr/foo$/m patterns too complex to encounter
the bug today, for style consistency and robustness against unrelated
pattern changes. Back-patch to 9.6, where test_pg_dump was introduced.
As of this writing, a fresh MSYS installation includes an affected Perl
5.8.8. The Perl 5.8.8 in Red Hat Enterprise Linux 5.11 carries a patch
that renders it unaffected, but the Perl 5.8.5 of Red Hat Enterprise
Linux 4.4 is affected.
2016-11-08 02:27:30 +01:00
qr/^primary_conninfo = '.*port=$port.*'\n/ m ,
2018-11-25 16:31:16 +01:00
'postgresql.auto.conf sets primary_conninfo' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
2017-01-04 10:40:38 +01:00
$ node - > command_ok (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backupxd" ] ,
2017-01-04 10:40:38 +01:00
'pg_basebackup runs in default xlog mode' ) ;
ok ( grep ( /^[0-9A-F]{24}$/ , slurp_dir ( "$tempdir/backupxd/pg_wal" ) ) ,
'WAL files copied' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupxd" ) ;
2017-01-04 10:40:38 +01:00
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_ok (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backupxf" , '-X' , 'fetch' ] ,
2015-07-01 03:15:29 +02:00
'pg_basebackup -X fetch runs' ) ;
2016-10-20 17:24:37 +02:00
ok ( grep ( /^[0-9A-F]{24}$/ , slurp_dir ( "$tempdir/backupxf/pg_wal" ) ) ,
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
'WAL files copied' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupxf" ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_ok (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backupxs" , '-X' , 'stream' ] ,
2015-07-01 03:15:29 +02:00
'pg_basebackup -X stream runs' ) ;
2018-04-06 22:23:23 +02:00
ok ( grep ( /^[0-9A-F]{24}$/ , slurp_dir ( "$tempdir/backupxs/pg_wal" ) ) ,
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
'WAL files copied' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupxs" ) ;
2016-10-23 15:16:31 +02:00
$ node - > command_ok (
2022-01-18 00:40:00 +01:00
[
@ pg_basebackup_defs , '-D' , "$tempdir/backupxst" , '-X' , 'stream' ,
'-Ft'
] ,
2016-10-23 15:16:31 +02:00
'pg_basebackup -X stream runs in tar mode' ) ;
ok ( - f "$tempdir/backupxst/pg_wal.tar" , "tar file was created" ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupxst" ) ;
2017-01-16 13:56:43 +01:00
$ node - > command_ok (
2018-05-09 16:14:46 +02:00
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
2017-01-16 13:56:43 +01:00
"$tempdir/backupnoslot" , '-X' ,
2018-05-09 16:14:46 +02:00
'stream' , '--no-slot'
] ,
2017-01-16 13:56:43 +01:00
'pg_basebackup -X stream runs with --no-slot' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupnoslot" ) ;
Support base backup targets.
pg_basebackup now has a --target=TARGET[:DETAIL] option. If specfied,
it is sent to the server as the value of the TARGET option to the
BASE_BACKUP command. If DETAIL is included, it is sent as the value of
the new TARGET_DETAIL option to the BASE_BACKUP command. If the
target is anything other than 'client', pg_basebackup assumes that it
will now be the server's job to write the backup in a location somehow
defined by the target, and that it therefore needs to write nothing
locally. However, the server will still send messages to the client
for progress reporting purposes.
On the server side, we now support two additional types of backup
targets. There is a 'blackhole' target, which just throws away the
backup data without doing anything at all with it. Naturally, this
should only be used for testing and debugging purposes, since you will
not actually have a backup when it finishes running. More usefully,
there is also a 'server' target, so you can now use something like
'pg_basebackup -Xnone -t server:/SOME/PATH' to write a backup to some
location on the server. We can extend this to more types of targets
in the future, and might even want to create an extensibility
mechanism for adding new target types.
Since WAL fetching is handled with separate client-side logic, it's
not part of this mechanism; thus, backups with non-default targets
must use -Xnone or -Xfetch.
Patch by me, with a bug fix by Jeevan Ladhe. The patch set of which
this is a part has also had review and/or testing from Tushar Ahuja,
Suraj Kharage, Dipesh Pandit, and Mark Dilger.
Discussion: http://postgr.es/m/CA+TgmoaYZbz0=Yk797aOJwkGJC-LK3iXn+wzzMx7KdwNpZhS5g@mail.gmail.com
2021-11-16 21:20:50 +01:00
$ node - > command_ok (
[ @ pg_basebackup_defs , '-D' , "$tempdir/backupxf" , '-X' , 'fetch' ] ,
'pg_basebackup -X fetch runs' ) ;
$ node - > command_fails_like (
[ @ pg_basebackup_defs , '--target' , 'blackhole' ] ,
qr/WAL cannot be streamed when a backup target is specified/ ,
'backup target requires -X' ) ;
$ node - > command_fails_like (
[ @ pg_basebackup_defs , '--target' , 'blackhole' , '-X' , 'stream' ] ,
qr/WAL cannot be streamed when a backup target is specified/ ,
'backup target requires -X other than -X stream' ) ;
$ node - > command_fails_like (
[ @ pg_basebackup_defs , '--target' , 'bogus' , '-X' , 'none' ] ,
qr/unrecognized target/ ,
'backup target unrecognized' ) ;
$ node - > command_fails_like (
[
@ pg_basebackup_defs , '--target' , 'blackhole' , '-X' ,
'none' , '-D' , "$tempdir/blackhole"
] ,
qr/cannot specify both output directory and backup target/ ,
'backup target and output directory' ) ;
$ node - > command_fails_like (
[ @ pg_basebackup_defs , '--target' , 'blackhole' , '-X' , 'none' , '-Ft' ] ,
qr/cannot specify both format and backup target/ ,
'backup target and output directory' ) ;
$ node - > command_ok (
[ @ pg_basebackup_defs , '--target' , 'blackhole' , '-X' , 'none' ] ,
'backup target blackhole' ) ;
$ node - > command_ok (
2022-02-18 23:00:03 +01:00
[
@ pg_basebackup_defs , '--target' ,
"server:$tempdir/backuponserver" , '-X' ,
'none'
] ,
Support base backup targets.
pg_basebackup now has a --target=TARGET[:DETAIL] option. If specfied,
it is sent to the server as the value of the TARGET option to the
BASE_BACKUP command. If DETAIL is included, it is sent as the value of
the new TARGET_DETAIL option to the BASE_BACKUP command. If the
target is anything other than 'client', pg_basebackup assumes that it
will now be the server's job to write the backup in a location somehow
defined by the target, and that it therefore needs to write nothing
locally. However, the server will still send messages to the client
for progress reporting purposes.
On the server side, we now support two additional types of backup
targets. There is a 'blackhole' target, which just throws away the
backup data without doing anything at all with it. Naturally, this
should only be used for testing and debugging purposes, since you will
not actually have a backup when it finishes running. More usefully,
there is also a 'server' target, so you can now use something like
'pg_basebackup -Xnone -t server:/SOME/PATH' to write a backup to some
location on the server. We can extend this to more types of targets
in the future, and might even want to create an extensibility
mechanism for adding new target types.
Since WAL fetching is handled with separate client-side logic, it's
not part of this mechanism; thus, backups with non-default targets
must use -Xnone or -Xfetch.
Patch by me, with a bug fix by Jeevan Ladhe. The patch set of which
this is a part has also had review and/or testing from Tushar Ahuja,
Suraj Kharage, Dipesh Pandit, and Mark Dilger.
Discussion: http://postgr.es/m/CA+TgmoaYZbz0=Yk797aOJwkGJC-LK3iXn+wzzMx7KdwNpZhS5g@mail.gmail.com
2021-11-16 21:20:50 +01:00
'backup target server' ) ;
ok ( - f "$tempdir/backuponserver/base.tar" , 'backup tar was created' ) ;
rmtree ( "$tempdir/backuponserver" ) ;
2022-02-02 19:50:33 +01:00
$ node - > command_ok (
[ qw( createuser --replication --role=pg_write_server_files backupuser ) ] ,
'create backup user' ) ;
$ node - > command_ok (
2022-02-18 23:00:03 +01:00
[
@ pg_basebackup_defs , '-U' , 'backupuser' , '--target' ,
"server:$tempdir/backuponserver" ,
'-X' , 'none'
] ,
2022-02-02 19:50:33 +01:00
'backup target server' ) ;
ok ( - f "$tempdir/backuponserver/base.tar" ,
'backup tar was created as non-superuser' ) ;
rmtree ( "$tempdir/backuponserver" ) ;
Support base backup targets.
pg_basebackup now has a --target=TARGET[:DETAIL] option. If specfied,
it is sent to the server as the value of the TARGET option to the
BASE_BACKUP command. If DETAIL is included, it is sent as the value of
the new TARGET_DETAIL option to the BASE_BACKUP command. If the
target is anything other than 'client', pg_basebackup assumes that it
will now be the server's job to write the backup in a location somehow
defined by the target, and that it therefore needs to write nothing
locally. However, the server will still send messages to the client
for progress reporting purposes.
On the server side, we now support two additional types of backup
targets. There is a 'blackhole' target, which just throws away the
backup data without doing anything at all with it. Naturally, this
should only be used for testing and debugging purposes, since you will
not actually have a backup when it finishes running. More usefully,
there is also a 'server' target, so you can now use something like
'pg_basebackup -Xnone -t server:/SOME/PATH' to write a backup to some
location on the server. We can extend this to more types of targets
in the future, and might even want to create an extensibility
mechanism for adding new target types.
Since WAL fetching is handled with separate client-side logic, it's
not part of this mechanism; thus, backups with non-default targets
must use -Xnone or -Xfetch.
Patch by me, with a bug fix by Jeevan Ladhe. The patch set of which
this is a part has also had review and/or testing from Tushar Ahuja,
Suraj Kharage, Dipesh Pandit, and Mark Dilger.
Discussion: http://postgr.es/m/CA+TgmoaYZbz0=Yk797aOJwkGJC-LK3iXn+wzzMx7KdwNpZhS5g@mail.gmail.com
2021-11-16 21:20:50 +01:00
$ node - > command_fails (
[
@ pg_basebackup_defs , '-D' ,
"$tempdir/backupxs_sl_fail" , '-X' ,
'stream' , '-S' ,
'slot0'
] ,
'pg_basebackup fails with nonexistent replication slot' ) ;
$ node - > command_fails (
[ @ pg_basebackup_defs , '-D' , "$tempdir/backupxs_slot" , '-C' ] ,
'pg_basebackup -C fails without slot name' ) ;
$ node - > command_fails (
[
@ pg_basebackup_defs , '-D' ,
"$tempdir/backupxs_slot" , '-C' ,
'-S' , 'slot0' ,
'--no-slot'
] ,
'pg_basebackup fails with -C -S --no-slot' ) ;
$ node - > command_fails_like (
[
@ pg_basebackup_defs , '--target' , 'blackhole' , '-D' ,
"$tempdir/blackhole"
] ,
qr/cannot specify both output directory and backup target/ ,
'backup target and output directory' ) ;
$ node - > command_ok (
[ @ pg_basebackup_defs , '-D' , "$tempdir/backuptr/co" , '-X' , 'none' ] ,
'pg_basebackup -X fetch runs' ) ;
2015-07-22 03:06:45 +02:00
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_fails (
2018-05-09 16:14:46 +02:00
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
"$tempdir/backupxs_sl_fail" , '-X' ,
'stream' , '-S' ,
2018-05-09 16:14:46 +02:00
'slot0'
] ,
2015-07-22 03:06:45 +02:00
'pg_basebackup fails with nonexistent replication slot' ) ;
2017-09-26 22:07:52 +02:00
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backupxs_slot" , '-C' ] ,
2017-09-26 22:07:52 +02:00
'pg_basebackup -C fails without slot name' ) ;
$ node - > command_fails (
2018-05-09 16:14:46 +02:00
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
2017-09-26 22:07:52 +02:00
"$tempdir/backupxs_slot" , '-C' ,
'-S' , 'slot0' ,
2018-05-09 16:14:46 +02:00
'--no-slot'
] ,
2017-09-26 22:07:52 +02:00
'pg_basebackup fails with -C -S --no-slot' ) ;
$ node - > command_ok (
2022-01-18 00:40:00 +01:00
[
@ pg_basebackup_defs , '-D' ,
"$tempdir/backupxs_slot" , '-C' ,
'-S' , 'slot0'
] ,
2017-09-26 22:07:52 +02:00
'pg_basebackup -C runs' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupxs_slot" ) ;
2017-09-26 22:07:52 +02:00
is ( $ node - > safe_psql (
'postgres' ,
2018-04-26 17:52:52 +02:00
q{ SELECT slot_name FROM pg_replication_slots WHERE slot_name = 'slot0' }
) ,
2017-09-26 22:07:52 +02:00
'slot0' ,
'replication slot was created' ) ;
isnt (
$ node - > safe_psql (
'postgres' ,
2018-04-26 17:52:52 +02:00
q{ SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot0' }
) ,
2017-09-26 22:07:52 +02:00
'' ,
'restart LSN of new slot is not null' ) ;
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[
@ pg_basebackup_defs , '-D' ,
"$tempdir/backupxs_slot1" , '-C' ,
'-S' , 'slot0'
] ,
2017-09-26 22:07:52 +02:00
'pg_basebackup fails with -C -S and a previously existing slot' ) ;
2016-03-03 21:58:30 +01:00
$ node - > safe_psql ( 'postgres' ,
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
q{ SELECT * FROM pg_create_physical_replication_slot('slot1') } ) ;
2016-03-03 21:58:30 +01:00
my $ lsn = $ node - > safe_psql ( 'postgres' ,
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
q{ SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1' }
) ;
2015-07-22 03:06:45 +02:00
is ( $ lsn , '' , 'restart LSN of new slot is null' ) ;
2017-09-24 04:59:26 +02:00
$ node - > command_fails (
2022-01-18 00:40:00 +01:00
[
@ pg_basebackup_defs , '-D' , "$tempdir/fail" , '-S' ,
'slot1' , '-X' , 'none'
] ,
2017-09-24 04:59:26 +02:00
'pg_basebackup with replication slot fails without WAL streaming' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_ok (
2018-05-09 16:14:46 +02:00
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' , "$tempdir/backupxs_sl" , '-X' ,
'stream' , '-S' , 'slot1'
2018-05-09 16:14:46 +02:00
] ,
2015-07-22 03:06:45 +02:00
'pg_basebackup -X stream with replication slot runs' ) ;
2016-03-03 21:58:30 +01:00
$ lsn = $ node - > safe_psql ( 'postgres' ,
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
q{ SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1' }
) ;
2015-07-29 18:17:02 +02:00
like ( $ lsn , qr!^0/[0-9A-Z]{7,8}$! , 'restart LSN of slot has advanced' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupxs_sl" ) ;
2015-07-22 03:06:45 +02:00
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
$ node - > command_ok (
2018-05-09 16:14:46 +02:00
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' , "$tempdir/backupxs_sl_R" , '-X' ,
'stream' , '-S' , 'slot1' , '-R' ,
2018-05-09 16:14:46 +02:00
] ,
2015-07-22 03:06:45 +02:00
'pg_basebackup with replication slot and -R runs' ) ;
Refactor Perl test code
The original code was a bit clunky; make it more amenable for further
reuse by creating a new Perl package PostgresNode, which is an
object-oriented representation of a single server, with some support
routines such as init, start, stop, psql. This serves as a better basis
on which to build further test code, and enables writing tests that use
more than one server without too much complication.
This commit modifies a lot of the existing test files, mostly to remove
explicit calls to system commands (pg_ctl) replacing them with method
calls of a PostgresNode object. The result is quite a bit more
straightforward.
Also move some initialization code to BEGIN and INIT blocks instead of
having it straight in as top-level code.
This commit also introduces package RecursiveCopy so that we can copy
whole directories without having to depend on packages that may not be
present on vanilla Perl 5.8 installations.
I also ran perltidy on the modified files, which changes some code sites
that are not otherwise touched by this patch. I tried to avoid this,
but it ended up being more trouble than it's worth.
Authors: Michael Paquier, Álvaro Herrera
Review: Noah Misch
2015-12-02 22:46:16 +01:00
like (
2018-11-25 16:31:16 +01:00
slurp_file ( "$tempdir/backupxs_sl_R/postgresql.auto.conf" ) ,
Change qr/foo$/m to qr/foo\n/m, for Perl 5.8.8.
In each case, absence of a trailing newline would itself constitute a
PostgreSQL bug. Therefore, this slightly enhances the changed tests.
This works around a bug that last appeared in Perl 5.8.8, fixing
src/test/modules/test_pg_dump when run against that version. Commit
e7293e3271bf618eeb2d4779a15fc516a69fe463 worked around the bug, but the
subsequent addition of test_pg_dump introduced affected code. As that
commit had shown, slight increases in pattern complexity can suppress
the bug. This commit edits qr/foo$/m patterns too complex to encounter
the bug today, for style consistency and robustness against unrelated
pattern changes. Back-patch to 9.6, where test_pg_dump was introduced.
As of this writing, a fresh MSYS installation includes an affected Perl
5.8.8. The Perl 5.8.8 in Red Hat Enterprise Linux 5.11 carries a patch
that renders it unaffected, but the Perl 5.8.5 of Red Hat Enterprise
Linux 4.4 is affected.
2016-11-08 02:27:30 +01:00
qr/^primary_slot_name = 'slot1'\n/ m ,
2018-11-25 16:31:16 +01:00
'recovery conf file sets primary_slot_name' ) ;
2018-04-03 13:47:16 +02:00
my $ checksum = $ node - > safe_psql ( 'postgres' , 'SHOW data_checksums;' ) ;
is ( $ checksum , 'on' , 'checksums are enabled' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backupxs_sl_R" ) ;
2018-04-03 13:47:16 +02:00
2024-03-21 06:18:59 +01:00
$ node - > command_ok (
[
@ pg_basebackup_defs , '-D' , "$tempdir/backup_dbname_R" , '-X' ,
'stream' , '-d' , "dbname=db1" , '-R' ,
] ,
'pg_basebackup with dbname and -R runs' ) ;
like (
slurp_file ( "$tempdir/backup_dbname_R/postgresql.auto.conf" ) ,
qr/dbname=db1/ m ,
'recovery conf file sets dbname' ) ;
rmtree ( "$tempdir/backup_dbname_R" ) ;
2018-04-04 11:35:48 +02:00
# create tables to corrupt and get their relfilenodes
my $ file_corrupt1 = $ node - > safe_psql ( 'postgres' ,
2021-01-28 14:01:41 +01:00
q{ CREATE TABLE corrupt1 AS SELECT a FROM generate_series(1,10000) AS a; ALTER TABLE corrupt1 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt1') }
2018-04-03 13:47:16 +02:00
) ;
2018-04-04 11:35:48 +02:00
my $ file_corrupt2 = $ node - > safe_psql ( 'postgres' ,
2021-01-28 14:01:41 +01:00
q{ CREATE TABLE corrupt2 AS SELECT b FROM generate_series(1,2) AS b; ALTER TABLE corrupt2 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt2') }
2018-04-03 13:47:16 +02:00
) ;
Harden TAP tests that intentionally corrupt page checksums.
The previous method for doing that was to write zeroes into a
predetermined set of page locations. However, there's a roughly
1-in-64K chance that the existing checksum will match by chance,
and yesterday several buildfarm animals started to reproducibly
see that, resulting in test failures because no checksum mismatch
was reported.
Since the checksum includes the page LSN, test success depends on
the length of the installation's WAL history, which is affected by
(at least) the initial catalog contents, the set of locales installed
on the system, and the length of the pathname of the test directory.
Sooner or later we were going to hit a chance match, and today is
that day.
Harden these tests by specifically inverting the checksum field and
leaving all else alone, thereby guaranteeing that the checksum is
incorrect.
In passing, fix places that were using seek() to set up for syswrite(),
a combination that the Perl docs very explicitly warn against. We've
probably escaped problems because no regular buffered I/O is done on
these filehandles; but if it ever breaks, we wouldn't deserve or get
much sympathy.
Although we've only seen problems in HEAD, now that we recognize the
environmental dependencies it seems like it might be just a matter
of time until someone manages to hit this in back-branch testing.
Hence, back-patch to v11 where we started doing this kind of test.
Discussion: https://postgr.es/m/3192026.1648185780@sss.pgh.pa.us
2022-03-25 19:23:26 +01:00
# get block size for corruption steps
2020-07-16 20:48:37 +02:00
my $ block_size = $ node - > safe_psql ( 'postgres' , 'SHOW block_size;' ) ;
2018-04-04 11:35:48 +02:00
2018-04-03 13:47:16 +02:00
# induce corruption
Harden TAP tests that intentionally corrupt page checksums.
The previous method for doing that was to write zeroes into a
predetermined set of page locations. However, there's a roughly
1-in-64K chance that the existing checksum will match by chance,
and yesterday several buildfarm animals started to reproducibly
see that, resulting in test failures because no checksum mismatch
was reported.
Since the checksum includes the page LSN, test success depends on
the length of the installation's WAL history, which is affected by
(at least) the initial catalog contents, the set of locales installed
on the system, and the length of the pathname of the test directory.
Sooner or later we were going to hit a chance match, and today is
that day.
Harden these tests by specifically inverting the checksum field and
leaving all else alone, thereby guaranteeing that the checksum is
incorrect.
In passing, fix places that were using seek() to set up for syswrite(),
a combination that the Perl docs very explicitly warn against. We've
probably escaped problems because no regular buffered I/O is done on
these filehandles; but if it ever breaks, we wouldn't deserve or get
much sympathy.
Although we've only seen problems in HEAD, now that we recognize the
environmental dependencies it seems like it might be just a matter
of time until someone manages to hit this in back-branch testing.
Hence, back-patch to v11 where we started doing this kind of test.
Discussion: https://postgr.es/m/3192026.1648185780@sss.pgh.pa.us
2022-03-25 19:23:26 +01:00
$ node - > stop ;
$ node - > corrupt_page_checksum ( $ file_corrupt1 , 0 ) ;
$ node - > start ;
2018-04-03 13:47:16 +02:00
$ node - > command_checks_all (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup_corrupt" ] ,
2018-04-03 13:47:16 +02:00
1 ,
[ qr{ ^$ } ] ,
[ qr/^WARNING.*checksum verification failed/ s ] ,
'pg_basebackup reports checksum mismatch' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backup_corrupt" ) ;
2018-04-03 13:47:16 +02:00
# induce further corruption in 5 more blocks
Harden TAP tests that intentionally corrupt page checksums.
The previous method for doing that was to write zeroes into a
predetermined set of page locations. However, there's a roughly
1-in-64K chance that the existing checksum will match by chance,
and yesterday several buildfarm animals started to reproducibly
see that, resulting in test failures because no checksum mismatch
was reported.
Since the checksum includes the page LSN, test success depends on
the length of the installation's WAL history, which is affected by
(at least) the initial catalog contents, the set of locales installed
on the system, and the length of the pathname of the test directory.
Sooner or later we were going to hit a chance match, and today is
that day.
Harden these tests by specifically inverting the checksum field and
leaving all else alone, thereby guaranteeing that the checksum is
incorrect.
In passing, fix places that were using seek() to set up for syswrite(),
a combination that the Perl docs very explicitly warn against. We've
probably escaped problems because no regular buffered I/O is done on
these filehandles; but if it ever breaks, we wouldn't deserve or get
much sympathy.
Although we've only seen problems in HEAD, now that we recognize the
environmental dependencies it seems like it might be just a matter
of time until someone manages to hit this in back-branch testing.
Hence, back-patch to v11 where we started doing this kind of test.
Discussion: https://postgr.es/m/3192026.1648185780@sss.pgh.pa.us
2022-03-25 19:23:26 +01:00
$ node - > stop ;
2018-04-04 11:35:48 +02:00
for my $ i ( 1 .. 5 )
{
Harden TAP tests that intentionally corrupt page checksums.
The previous method for doing that was to write zeroes into a
predetermined set of page locations. However, there's a roughly
1-in-64K chance that the existing checksum will match by chance,
and yesterday several buildfarm animals started to reproducibly
see that, resulting in test failures because no checksum mismatch
was reported.
Since the checksum includes the page LSN, test success depends on
the length of the installation's WAL history, which is affected by
(at least) the initial catalog contents, the set of locales installed
on the system, and the length of the pathname of the test directory.
Sooner or later we were going to hit a chance match, and today is
that day.
Harden these tests by specifically inverting the checksum field and
leaving all else alone, thereby guaranteeing that the checksum is
incorrect.
In passing, fix places that were using seek() to set up for syswrite(),
a combination that the Perl docs very explicitly warn against. We've
probably escaped problems because no regular buffered I/O is done on
these filehandles; but if it ever breaks, we wouldn't deserve or get
much sympathy.
Although we've only seen problems in HEAD, now that we recognize the
environmental dependencies it seems like it might be just a matter
of time until someone manages to hit this in back-branch testing.
Hence, back-patch to v11 where we started doing this kind of test.
Discussion: https://postgr.es/m/3192026.1648185780@sss.pgh.pa.us
2022-03-25 19:23:26 +01:00
$ node - > corrupt_page_checksum ( $ file_corrupt1 , $ i * $ block_size ) ;
2018-04-03 13:47:16 +02:00
}
Harden TAP tests that intentionally corrupt page checksums.
The previous method for doing that was to write zeroes into a
predetermined set of page locations. However, there's a roughly
1-in-64K chance that the existing checksum will match by chance,
and yesterday several buildfarm animals started to reproducibly
see that, resulting in test failures because no checksum mismatch
was reported.
Since the checksum includes the page LSN, test success depends on
the length of the installation's WAL history, which is affected by
(at least) the initial catalog contents, the set of locales installed
on the system, and the length of the pathname of the test directory.
Sooner or later we were going to hit a chance match, and today is
that day.
Harden these tests by specifically inverting the checksum field and
leaving all else alone, thereby guaranteeing that the checksum is
incorrect.
In passing, fix places that were using seek() to set up for syswrite(),
a combination that the Perl docs very explicitly warn against. We've
probably escaped problems because no regular buffered I/O is done on
these filehandles; but if it ever breaks, we wouldn't deserve or get
much sympathy.
Although we've only seen problems in HEAD, now that we recognize the
environmental dependencies it seems like it might be just a matter
of time until someone manages to hit this in back-branch testing.
Hence, back-patch to v11 where we started doing this kind of test.
Discussion: https://postgr.es/m/3192026.1648185780@sss.pgh.pa.us
2022-03-25 19:23:26 +01:00
$ node - > start ;
2018-04-03 13:47:16 +02:00
$ node - > command_checks_all (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup_corrupt2" ] ,
2018-04-03 13:47:16 +02:00
1 ,
[ qr{ ^$ } ] ,
[ qr/^WARNING.*further.*failures.*will.not.be.reported/ s ] ,
'pg_basebackup does not report more than 5 checksum mismatches' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backup_corrupt2" ) ;
2018-04-03 13:47:16 +02:00
# induce corruption in a second file
Harden TAP tests that intentionally corrupt page checksums.
The previous method for doing that was to write zeroes into a
predetermined set of page locations. However, there's a roughly
1-in-64K chance that the existing checksum will match by chance,
and yesterday several buildfarm animals started to reproducibly
see that, resulting in test failures because no checksum mismatch
was reported.
Since the checksum includes the page LSN, test success depends on
the length of the installation's WAL history, which is affected by
(at least) the initial catalog contents, the set of locales installed
on the system, and the length of the pathname of the test directory.
Sooner or later we were going to hit a chance match, and today is
that day.
Harden these tests by specifically inverting the checksum field and
leaving all else alone, thereby guaranteeing that the checksum is
incorrect.
In passing, fix places that were using seek() to set up for syswrite(),
a combination that the Perl docs very explicitly warn against. We've
probably escaped problems because no regular buffered I/O is done on
these filehandles; but if it ever breaks, we wouldn't deserve or get
much sympathy.
Although we've only seen problems in HEAD, now that we recognize the
environmental dependencies it seems like it might be just a matter
of time until someone manages to hit this in back-branch testing.
Hence, back-patch to v11 where we started doing this kind of test.
Discussion: https://postgr.es/m/3192026.1648185780@sss.pgh.pa.us
2022-03-25 19:23:26 +01:00
$ node - > stop ;
$ node - > corrupt_page_checksum ( $ file_corrupt2 , 0 ) ;
$ node - > start ;
2018-04-03 13:47:16 +02:00
$ node - > command_checks_all (
2022-01-18 00:40:00 +01:00
[ @ pg_basebackup_defs , '-D' , "$tempdir/backup_corrupt3" ] ,
2018-04-03 13:47:16 +02:00
1 ,
[ qr{ ^$ } ] ,
[ qr/^WARNING.*7 total checksum verification failures/ s ] ,
'pg_basebackup correctly report the total number of checksum mismatches' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backup_corrupt3" ) ;
2018-04-03 13:47:16 +02:00
# do not verify checksums, should return ok
$ node - > command_ok (
2018-05-21 16:01:49 +02:00
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
"$tempdir/backup_corrupt4" , '--no-verify-checksums' ,
2018-05-21 16:01:49 +02:00
] ,
2018-04-03 13:47:16 +02:00
'pg_basebackup with -k does not report checksum mismatch' ) ;
2018-04-06 22:26:31 +02:00
rmtree ( "$tempdir/backup_corrupt4" ) ;
2018-04-04 11:35:48 +02:00
$ node - > safe_psql ( 'postgres' , "DROP TABLE corrupt1;" ) ;
$ node - > safe_psql ( 'postgres' , "DROP TABLE corrupt2;" ) ;
2022-01-07 06:13:35 +01:00
note "Testing pg_basebackup with compression methods" ;
# Check ZLIB compression if available.
SKIP:
{
2022-01-21 03:08:43 +01:00
skip "postgres was not built with ZLIB support" , 7
2022-01-07 06:13:35 +01:00
if ( ! check_pg_config ( "#define HAVE_LIBZ 1" ) ) ;
$ node - > command_ok (
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
2022-01-07 06:13:35 +01:00
"$tempdir/backup_gzip" , '--compress' ,
2022-01-18 00:40:00 +01:00
'1' , '--format' ,
't'
2022-01-07 06:13:35 +01:00
] ,
'pg_basebackup with --compress' ) ;
2022-01-08 01:12:21 +01:00
$ node - > command_ok (
[
2022-01-18 00:40:00 +01:00
@ pg_basebackup_defs , '-D' ,
2022-01-08 01:12:21 +01:00
"$tempdir/backup_gzip2" , '--gzip' ,
2022-01-18 00:40:00 +01:00
'--format' , 't'
2022-01-08 01:12:21 +01:00
] ,
'pg_basebackup with --gzip' ) ;
2022-01-21 03:08:43 +01:00
$ node - > command_ok (
[
@ pg_basebackup_defs , '-D' ,
"$tempdir/backup_gzip3" , '--compress' ,
'gzip:1' , '--format' ,
't'
] ,
'pg_basebackup with --compress=gzip:1' ) ;
2022-01-07 06:13:35 +01:00
# Verify that the stored files are generated with their expected
# names.
my @ zlib_files = glob "$tempdir/backup_gzip/*.tar.gz" ;
is ( scalar ( @ zlib_files ) , 2 ,
2022-01-21 03:08:43 +01:00
"two files created with --compress=NUM (base.tar.gz and pg_wal.tar.gz)"
) ;
2022-01-08 01:12:21 +01:00
my @ zlib_files2 = glob "$tempdir/backup_gzip2/*.tar.gz" ;
is ( scalar ( @ zlib_files2 ) , 2 ,
"two files created with --gzip (base.tar.gz and pg_wal.tar.gz)" ) ;
2022-01-21 03:08:43 +01:00
my @ zlib_files3 = glob "$tempdir/backup_gzip3/*.tar.gz" ;
is ( scalar ( @ zlib_files3 ) , 2 ,
"two files created with --compress=gzip:NUM (base.tar.gz and pg_wal.tar.gz)"
) ;
2022-01-07 06:13:35 +01:00
# Check the integrity of the files generated.
my $ gzip = $ ENV { GZIP_PROGRAM } ;
skip "program gzip is not found in your system" , 1
2022-02-15 05:41:40 +01:00
if ( ! defined $ gzip
|| $ gzip eq '' ) ;
2022-01-07 06:13:35 +01:00
2022-01-08 01:12:21 +01:00
my $ gzip_is_valid =
2022-01-21 03:08:43 +01:00
system_log ( $ gzip , '--test' , @ zlib_files , @ zlib_files2 , @ zlib_files3 ) ;
2022-01-07 06:13:35 +01:00
is ( $ gzip_is_valid , 0 , "gzip verified the integrity of compressed data" ) ;
rmtree ( "$tempdir/backup_gzip" ) ;
2022-01-08 01:12:21 +01:00
rmtree ( "$tempdir/backup_gzip2" ) ;
2022-01-21 03:08:43 +01:00
rmtree ( "$tempdir/backup_gzip3" ) ;
2022-01-07 06:13:35 +01:00
}
2022-02-02 19:50:33 +01:00
2022-02-23 14:24:43 +01:00
# Test background stream process terminating before the basebackup has
# finished, the main process should exit gracefully with an error message on
# stderr. To reduce the risk of timing related issues we invoke the base
# backup with rate throttling enabled.
$ node - > safe_psql ( 'postgres' ,
q{ CREATE TABLE t AS SELECT a FROM generate_series(1,10000) AS a; } ) ;
2022-04-16 08:15:38 +02:00
my $ sigchld_bb_timeout =
IPC::Run:: timer ( $ PostgreSQL:: Test:: Utils:: timeout_default ) ;
2022-02-23 14:24:43 +01:00
my ( $ sigchld_bb_stdin , $ sigchld_bb_stdout , $ sigchld_bb_stderr ) = ( '' , '' , '' ) ;
my $ sigchld_bb = IPC::Run:: start (
[
@ pg_basebackup_defs , '--wal-method=stream' ,
'-D' , "$tempdir/sigchld" ,
'--max-rate=32' , '-d' ,
$ node - > connstr ( 'postgres' )
] ,
'<' ,
\ $ sigchld_bb_stdin ,
'>' ,
\ $ sigchld_bb_stdout ,
'2>' ,
\ $ sigchld_bb_stderr ,
$ sigchld_bb_timeout ) ;
is ( $ node - > poll_query_until (
'postgres' ,
"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE "
. "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' "
. "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'" ) ,
"1" ,
"Walsender killed" ) ;
ok ( pump_until (
$ sigchld_bb , $ sigchld_bb_timeout ,
\ $ sigchld_bb_stderr , qr/background process terminated unexpectedly/ ) ,
'background process exit message' ) ;
$ sigchld_bb - > finish ( ) ;
2022-07-22 07:37:39 +02:00
# Test that we can back up an in-place tablespace
$ node - > safe_psql ( 'postgres' ,
"SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';"
) ;
$ node - > safe_psql ( 'postgres' ,
"CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
. "INSERT INTO test2 VALUES (1234);" ) ;
my $ tblspc_oid = $ node - > safe_psql ( 'postgres' ,
"SELECT oid FROM pg_tablespace WHERE spcname = 'tblspc2';" ) ;
$ node - > backup ( 'backup3' ) ;
$ node - > safe_psql ( 'postgres' , "DROP TABLE test2;" ) ;
$ node - > safe_psql ( 'postgres' , "DROP TABLESPACE tblspc2;" ) ;
# check that the in-place tablespace exists in the backup
$ backupdir = $ node - > backup_dir . '/backup3' ;
my @ dst_tblspc = glob "$backupdir/pg_tblspc/$tblspc_oid/PG_*" ;
is ( @ dst_tblspc , 1 , 'tblspc directory copied' ) ;
2024-03-13 20:04:22 +01:00
# Can't take backup with referring manifest of different cluster
#
# Set up another new database instance with force initdb option. We don't want
# to initializing database system by copying initdb template for this, because
# we want it to be a separate cluster with a different system ID.
my $ node2 = PostgreSQL::Test::Cluster - > new ( 'node2' ) ;
$ node2 - > init ( force_initdb = > 1 , has_archiving = > 1 , allows_streaming = > 1 ) ;
$ node2 - > append_conf ( 'postgresql.conf' , 'summarize_wal = on' ) ;
$ node2 - > start ;
$ node2 - > command_fails_like (
[ @ pg_basebackup_defs , '-D' , "$tempdir" . '/diff_sysid' ,
'--incremental' , "$backupdir" . '/backup_manifest' ] ,
qr/manifest system identifier is .*, but database system identifier is/ ,
"pg_basebackup fails with different database system manifest" ) ;
2022-02-02 19:50:33 +01:00
done_testing ( ) ;