Pre-beta mechanical code beautification.

Run pgindent, pgperltidy, and reformat-dat-files.

The pgindent part of this is pretty small, consisting mainly of
fixing up self-inflicted formatting damage from patches that
hadn't bothered to add their new typedefs to typedefs.list.
In order to keep it from making anything worse, I manually added
a dozen or so typedefs that appeared in the existing typedefs.list
but not in the buildfarm's list.  Perhaps we should formalize that,
or better find a way to get those typedefs into the automatic list.

pgperltidy is as opinionated as always, and reformat-dat-files too.
This commit is contained in:
Tom Lane 2024-05-14 16:34:50 -04:00
parent 3ddbac368c
commit da256a4a7f
60 changed files with 969 additions and 689 deletions

View File

@ -50,7 +50,7 @@ typedef struct
* command. Elsewhere (including the case of default) NULL. * command. Elsewhere (including the case of default) NULL.
*/ */
const char *createdb_dtemplate; const char *createdb_dtemplate;
} sepgsql_context_info_t; } sepgsql_context_info_t;
static sepgsql_context_info_t sepgsql_context_info; static sepgsql_context_info_t sepgsql_context_info;

View File

@ -67,7 +67,7 @@ typedef struct
{ {
SubTransactionId subid; SubTransactionId subid;
char *label; char *label;
} pending_label; } pending_label;
/* /*
* sepgsql_get_client_label * sepgsql_get_client_label

View File

@ -44,7 +44,7 @@ typedef struct
/* true, if tcontext is valid */ /* true, if tcontext is valid */
char *ncontext; /* temporary scontext on execution of trusted char *ncontext; /* temporary scontext on execution of trusted
* procedure, or NULL elsewhere */ * procedure, or NULL elsewhere */
} avc_cache; } avc_cache;
/* /*
* Declaration of static variables * Declaration of static variables

View File

@ -315,72 +315,72 @@ sub ParseData
my $catname = $1; my $catname = $1;
my $data = []; my $data = [];
# Scan the input file. # Scan the input file.
while (<$ifd>) while (<$ifd>)
{
my $hash_ref;
if (/{/)
{ {
my $hash_ref; # Capture the hash ref
# NB: Assumes that the next hash ref can't start on the
# same line where the present one ended.
# Not foolproof, but we shouldn't need a full parser,
# since we expect relatively well-behaved input.
if (/{/) # Quick hack to detect when we have a full hash ref to
# parse. We can't just use a regex because of values in
# pg_aggregate and pg_proc like '{0,0}'. This will need
# work if we ever need to allow unbalanced braces within
# a field value.
my $lcnt = tr/{//;
my $rcnt = tr/}//;
if ($lcnt == $rcnt)
{ {
# Capture the hash ref # We're treating the input line as a piece of Perl, so we
# NB: Assumes that the next hash ref can't start on the # need to use string eval here. Tell perlcritic we know what
# same line where the present one ended. # we're doing.
# Not foolproof, but we shouldn't need a full parser, eval "\$hash_ref = $_"; ## no critic (ProhibitStringyEval)
# since we expect relatively well-behaved input. if (!ref $hash_ref)
# Quick hack to detect when we have a full hash ref to
# parse. We can't just use a regex because of values in
# pg_aggregate and pg_proc like '{0,0}'. This will need
# work if we ever need to allow unbalanced braces within
# a field value.
my $lcnt = tr/{//;
my $rcnt = tr/}//;
if ($lcnt == $rcnt)
{ {
# We're treating the input line as a piece of Perl, so we die "$input_file: error parsing line $.:\n$_\n";
# need to use string eval here. Tell perlcritic we know what
# we're doing.
eval "\$hash_ref = $_"; ## no critic (ProhibitStringyEval)
if (!ref $hash_ref)
{
die "$input_file: error parsing line $.:\n$_\n";
}
# Annotate each hash with the source line number.
$hash_ref->{line_number} = $.;
# Expand tuples to their full representation.
AddDefaultValues($hash_ref, $schema, $catname);
} }
else
{
my $next_line = <$ifd>;
die "$input_file: file ends within Perl hash\n"
if !defined $next_line;
$_ .= $next_line;
redo;
}
}
# If we found a hash reference, keep it, unless it is marked as # Annotate each hash with the source line number.
# autogenerated; in that case it'd duplicate an entry we'll $hash_ref->{line_number} = $.;
# autogenerate below. (This makes it safe for reformat_dat_file.pl
# with --full-tuples to print autogenerated entries, which seems like # Expand tuples to their full representation.
# useful behavior for debugging.) AddDefaultValues($hash_ref, $schema, $catname);
#
# Otherwise, we have a non-data string, which we keep only if
# the caller requested it.
if (defined $hash_ref)
{
push @$data, $hash_ref if !$hash_ref->{autogenerated};
} }
else else
{ {
push @$data, $_ if $preserve_comments; my $next_line = <$ifd>;
die "$input_file: file ends within Perl hash\n"
if !defined $next_line;
$_ .= $next_line;
redo;
} }
} }
# If we found a hash reference, keep it, unless it is marked as
# autogenerated; in that case it'd duplicate an entry we'll
# autogenerate below. (This makes it safe for reformat_dat_file.pl
# with --full-tuples to print autogenerated entries, which seems like
# useful behavior for debugging.)
#
# Otherwise, we have a non-data string, which we keep only if
# the caller requested it.
if (defined $hash_ref)
{
push @$data, $hash_ref if !$hash_ref->{autogenerated};
}
else
{
push @$data, $_ if $preserve_comments;
}
}
close $ifd; close $ifd;
# If this is pg_type, auto-generate array types too. # If this is pg_type, auto-generate array types too.

View File

@ -302,9 +302,7 @@ $node->safe_psql(
)); ));
$node->command_checks_all( $node->command_checks_all(
[ [ 'pg_amcheck', '-d', 'regression_invalid' ],
'pg_amcheck', '-d', 'regression_invalid'
],
1, 1,
[qr/^$/], [qr/^$/],
[ [
@ -314,8 +312,7 @@ $node->command_checks_all(
$node->command_checks_all( $node->command_checks_all(
[ [
'pg_amcheck', '-d', 'postgres', 'pg_amcheck', '-d', 'postgres', '-t', 'regression_invalid.public.foo',
'-t', 'regression_invalid.public.foo',
], ],
1, 1,
[qr/^$/], [qr/^$/],

View File

@ -411,7 +411,9 @@ SKIP:
$tblspc_tars[0] =~ m|/([0-9]*)\.tar$|; $tblspc_tars[0] =~ m|/([0-9]*)\.tar$|;
my $tblspcoid = $1; my $tblspcoid = $1;
my $realRepTsDir = "$real_sys_tempdir/tblspc1replica"; my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
$node2->init_from_backup($node, 'tarbackup2', tar_program => $tar, $node2->init_from_backup(
$node, 'tarbackup2',
tar_program => $tar,
'tablespace_map' => { $tblspcoid => $realRepTsDir }); 'tablespace_map' => { $tblspcoid => $realRepTsDir });
$node2->start; $node2->start;
@ -776,10 +778,8 @@ $node->command_ok(
'stream', '-d', "dbname=db1", '-R', 'stream', '-d', "dbname=db1", '-R',
], ],
'pg_basebackup with dbname and -R runs'); 'pg_basebackup with dbname and -R runs');
like( like(slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"),
slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"), qr/dbname=db1/m, 'recovery conf file sets dbname');
qr/dbname=db1/m,
'recovery conf file sets dbname');
rmtree("$tempdir/backup_dbname_R"); rmtree("$tempdir/backup_dbname_R");
@ -976,8 +976,11 @@ $node2->append_conf('postgresql.conf', 'summarize_wal = on');
$node2->start; $node2->start;
$node2->command_fails_like( $node2->command_fails_like(
[ @pg_basebackup_defs, '-D', "$tempdir" . '/diff_sysid', [
'--incremental', "$backupdir" . '/backup_manifest' ], @pg_basebackup_defs, '-D',
"$tempdir" . '/diff_sysid', '--incremental',
"$backupdir" . '/backup_manifest'
],
qr/manifest system identifier is .*, but database system identifier is/, qr/manifest system identifier is .*, but database system identifier is/,
"pg_basebackup fails with different database system manifest"); "pg_basebackup fails with different database system manifest");

View File

@ -140,11 +140,11 @@ command_fails(
'pg_createsubscriber', '--verbose', 'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata', '--dry-run', '--pgdata',
$node_t->data_dir, '--publisher-server', $node_t->data_dir, '--publisher-server',
$node_p->connstr('pg1'), $node_p->connstr('pg1'), '--socket-directory',
'--socket-directory', $node_t->host, $node_t->host, '--subscriber-port',
'--subscriber-port', $node_t->port, $node_t->port, '--database',
'--database', 'pg1', 'pg1', '--database',
'--database', 'pg2' 'pg2'
], ],
'target server is not in recovery'); 'target server is not in recovery');
@ -154,11 +154,11 @@ command_fails(
'pg_createsubscriber', '--verbose', 'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata', '--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server', $node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'), $node_p->connstr('pg1'), '--socket-directory',
'--socket-directory', $node_s->host, $node_s->host, '--subscriber-port',
'--subscriber-port', $node_s->port, $node_s->port, '--database',
'--database', 'pg1', 'pg1', '--database',
'--database', 'pg2' 'pg2'
], ],
'standby is up and running'); 'standby is up and running');
@ -188,11 +188,11 @@ command_fails(
'pg_createsubscriber', '--verbose', 'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata', '--dry-run', '--pgdata',
$node_c->data_dir, '--publisher-server', $node_c->data_dir, '--publisher-server',
$node_s->connstr('pg1'), $node_s->connstr('pg1'), '--socket-directory',
'--socket-directory', $node_c->host, $node_c->host, '--subscriber-port',
'--subscriber-port', $node_c->port, $node_c->port, '--database',
'--database', 'pg1', 'pg1', '--database',
'--database', 'pg2' 'pg2'
], ],
'primary server is in recovery'); 'primary server is in recovery');
@ -201,7 +201,8 @@ $node_p->safe_psql('pg1', "INSERT INTO tbl1 VALUES('second row')");
$node_p->wait_for_replay_catchup($node_s); $node_p->wait_for_replay_catchup($node_s);
# Check some unmet conditions on node P # Check some unmet conditions on node P
$node_p->append_conf('postgresql.conf', q{ $node_p->append_conf(
'postgresql.conf', q{
wal_level = replica wal_level = replica
max_replication_slots = 1 max_replication_slots = 1
max_wal_senders = 1 max_wal_senders = 1
@ -214,16 +215,17 @@ command_fails(
'pg_createsubscriber', '--verbose', 'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata', '--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server', $node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'), $node_p->connstr('pg1'), '--socket-directory',
'--socket-directory', $node_s->host, $node_s->host, '--subscriber-port',
'--subscriber-port', $node_s->port, $node_s->port, '--database',
'--database', 'pg1', 'pg1', '--database',
'--database', 'pg2' 'pg2'
], ],
'primary contains unmet conditions on node P'); 'primary contains unmet conditions on node P');
# Restore default settings here but only apply it after testing standby. Some # Restore default settings here but only apply it after testing standby. Some
# standby settings should not be a lower setting than on the primary. # standby settings should not be a lower setting than on the primary.
$node_p->append_conf('postgresql.conf', q{ $node_p->append_conf(
'postgresql.conf', q{
wal_level = logical wal_level = logical
max_replication_slots = 10 max_replication_slots = 10
max_wal_senders = 10 max_wal_senders = 10
@ -231,7 +233,8 @@ max_worker_processes = 8
}); });
# Check some unmet conditions on node S # Check some unmet conditions on node S
$node_s->append_conf('postgresql.conf', q{ $node_s->append_conf(
'postgresql.conf', q{
max_replication_slots = 1 max_replication_slots = 1
max_logical_replication_workers = 1 max_logical_replication_workers = 1
max_worker_processes = 2 max_worker_processes = 2
@ -241,14 +244,15 @@ command_fails(
'pg_createsubscriber', '--verbose', 'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata', '--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server', $node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'), $node_p->connstr('pg1'), '--socket-directory',
'--socket-directory', $node_s->host, $node_s->host, '--subscriber-port',
'--subscriber-port', $node_s->port, $node_s->port, '--database',
'--database', 'pg1', 'pg1', '--database',
'--database', 'pg2' 'pg2'
], ],
'standby contains unmet conditions on node S'); 'standby contains unmet conditions on node S');
$node_s->append_conf('postgresql.conf', q{ $node_s->append_conf(
'postgresql.conf', q{
max_replication_slots = 10 max_replication_slots = 10
max_logical_replication_workers = 4 max_logical_replication_workers = 4
max_worker_processes = 8 max_worker_processes = 8
@ -262,15 +266,15 @@ command_ok(
'pg_createsubscriber', '--verbose', 'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata', '--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server', $node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'), $node_p->connstr('pg1'), '--socket-directory',
'--socket-directory', $node_s->host, $node_s->host, '--subscriber-port',
'--subscriber-port', $node_s->port, $node_s->port, '--publication',
'--publication', 'pub1', 'pub1', '--publication',
'--publication', 'pub2', 'pub2', '--subscription',
'--subscription', 'sub1', 'sub1', '--subscription',
'--subscription', 'sub2', 'sub2', '--database',
'--database', 'pg1', 'pg1', '--database',
'--database', 'pg2' 'pg2'
], ],
'run pg_createsubscriber --dry-run on node S'); 'run pg_createsubscriber --dry-run on node S');
@ -286,10 +290,10 @@ command_ok(
'pg_createsubscriber', '--verbose', 'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata', '--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server', $node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'), $node_p->connstr('pg1'), '--socket-directory',
'--socket-directory', $node_s->host, $node_s->host, '--subscriber-port',
'--subscriber-port', $node_s->port, $node_s->port, '--replication-slot',
'--replication-slot', 'replslot1' 'replslot1'
], ],
'run pg_createsubscriber without --databases'); 'run pg_createsubscriber without --databases');
@ -299,15 +303,15 @@ command_ok(
'pg_createsubscriber', '--verbose', 'pg_createsubscriber', '--verbose',
'--verbose', '--pgdata', '--verbose', '--pgdata',
$node_s->data_dir, '--publisher-server', $node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'), $node_p->connstr('pg1'), '--socket-directory',
'--socket-directory', $node_s->host, $node_s->host, '--subscriber-port',
'--subscriber-port', $node_s->port, $node_s->port, '--publication',
'--publication', 'pub1', 'pub1', '--publication',
'--publication', 'Pub2', 'Pub2', '--replication-slot',
'--replication-slot', 'replslot1', 'replslot1', '--replication-slot',
'--replication-slot', 'replslot2', 'replslot2', '--database',
'--database', 'pg1', 'pg1', '--database',
'--database', 'pg2' 'pg2'
], ],
'run pg_createsubscriber on node S'); 'run pg_createsubscriber on node S');

View File

@ -119,7 +119,7 @@ append_to_file "$pgdata/global/pg_internal.init.123", "foo";
# Only perform this test on non-macOS systems though as creating incorrect # Only perform this test on non-macOS systems though as creating incorrect
# system files may have side effects on macOS. # system files may have side effects on macOS.
append_to_file "$pgdata/global/.DS_Store", "foo" append_to_file "$pgdata/global/.DS_Store", "foo"
unless ($Config{osname} eq 'darwin'); unless ($Config{osname} eq 'darwin');
# Enable checksums. # Enable checksums.
command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ], command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],

View File

@ -44,7 +44,7 @@ EOM
# Read list of tablespace OIDs. There should be just one. # Read list of tablespace OIDs. There should be just one.
my @tsoids = grep { /^\d+/ } slurp_dir($primary->data_dir . '/pg_tblspc'); my @tsoids = grep { /^\d+/ } slurp_dir($primary->data_dir . '/pg_tblspc');
is(0+@tsoids, 1, "exactly one user-defined tablespace"); is(0 + @tsoids, 1, "exactly one user-defined tablespace");
my $tsoid = $tsoids[0]; my $tsoid = $tsoids[0];
# Take a full backup. # Take a full backup.
@ -52,8 +52,12 @@ my $backup1path = $primary->backup_dir . '/backup1';
my $tsbackup1path = $tempdir . '/ts1backup'; my $tsbackup1path = $tempdir . '/ts1backup';
mkdir($tsbackup1path) || die "mkdir $tsbackup1path: $!"; mkdir($tsbackup1path) || die "mkdir $tsbackup1path: $!";
$primary->command_ok( $primary->command_ok(
[ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast', [
"-T${tsprimary}=${tsbackup1path}" ], "full backup"); 'pg_basebackup', '-D',
$backup1path, '--no-sync',
'-cfast', "-T${tsprimary}=${tsbackup1path}"
],
"full backup");
# Now make some database changes. # Now make some database changes.
$primary->safe_psql('postgres', <<EOM); $primary->safe_psql('postgres', <<EOM);
@ -79,9 +83,12 @@ my $backup2path = $primary->backup_dir . '/backup2';
my $tsbackup2path = $tempdir . '/tsbackup2'; my $tsbackup2path = $tempdir . '/tsbackup2';
mkdir($tsbackup2path) || die "mkdir $tsbackup2path: $!"; mkdir($tsbackup2path) || die "mkdir $tsbackup2path: $!";
$primary->command_ok( $primary->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', [
"-T${tsprimary}=${tsbackup2path}", 'pg_basebackup', '-D',
'--incremental', $backup1path . '/backup_manifest' ], $backup2path, '--no-sync',
'-cfast', "-T${tsprimary}=${tsbackup2path}",
'--incremental', $backup1path . '/backup_manifest'
],
"incremental backup"); "incremental backup");
# Find an LSN to which either backup can be recovered. # Find an LSN to which either backup can be recovered.
@ -105,10 +112,13 @@ $primary->poll_query_until('postgres', $archive_wait_query)
# choose the same timeline. # choose the same timeline.
my $tspitr1path = $tempdir . '/tspitr1'; my $tspitr1path = $tempdir . '/tspitr1';
my $pitr1 = PostgreSQL::Test::Cluster->new('pitr1'); my $pitr1 = PostgreSQL::Test::Cluster->new('pitr1');
$pitr1->init_from_backup($primary, 'backup1', $pitr1->init_from_backup(
standby => 1, has_restoring => 1, $primary, 'backup1',
tablespace_map => { $tsoid => $tspitr1path }); standby => 1,
$pitr1->append_conf('postgresql.conf', qq{ has_restoring => 1,
tablespace_map => { $tsoid => $tspitr1path });
$pitr1->append_conf(
'postgresql.conf', qq{
recovery_target_lsn = '$lsn' recovery_target_lsn = '$lsn'
recovery_target_action = 'promote' recovery_target_action = 'promote'
archive_mode = 'off' archive_mode = 'off'
@ -119,11 +129,14 @@ $pitr1->start();
# basic configuration as before. # basic configuration as before.
my $tspitr2path = $tempdir . '/tspitr2'; my $tspitr2path = $tempdir . '/tspitr2';
my $pitr2 = PostgreSQL::Test::Cluster->new('pitr2'); my $pitr2 = PostgreSQL::Test::Cluster->new('pitr2');
$pitr2->init_from_backup($primary, 'backup2', $pitr2->init_from_backup(
standby => 1, has_restoring => 1, $primary, 'backup2',
combine_with_prior => [ 'backup1' ], standby => 1,
tablespace_map => { $tsbackup2path => $tspitr2path }); has_restoring => 1,
$pitr2->append_conf('postgresql.conf', qq{ combine_with_prior => ['backup1'],
tablespace_map => { $tsbackup2path => $tspitr2path });
$pitr2->append_conf(
'postgresql.conf', qq{
recovery_target_lsn = '$lsn' recovery_target_lsn = '$lsn'
recovery_target_action = 'promote' recovery_target_action = 'promote'
archive_mode = 'off' archive_mode = 'off'
@ -131,11 +144,9 @@ archive_mode = 'off'
$pitr2->start(); $pitr2->start();
# Wait until both servers exit recovery. # Wait until both servers exit recovery.
$pitr1->poll_query_until('postgres', $pitr1->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery();")
"SELECT NOT pg_is_in_recovery();")
or die "Timed out while waiting apply to reach LSN $lsn"; or die "Timed out while waiting apply to reach LSN $lsn";
$pitr2->poll_query_until('postgres', $pitr2->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery();")
"SELECT NOT pg_is_in_recovery();")
or die "Timed out while waiting apply to reach LSN $lsn"; or die "Timed out while waiting apply to reach LSN $lsn";
# Perform a logical dump of each server, and check that they match. # Perform a logical dump of each server, and check that they match.
@ -150,14 +161,20 @@ $pitr2->poll_query_until('postgres',
my $backupdir = $primary->backup_dir; my $backupdir = $primary->backup_dir;
my $dump1 = $backupdir . '/pitr1.dump'; my $dump1 = $backupdir . '/pitr1.dump';
my $dump2 = $backupdir . '/pitr2.dump'; my $dump2 = $backupdir . '/pitr2.dump';
$pitr1->command_ok([ $pitr1->command_ok(
'pg_dumpall', '-f', $dump1, '--no-sync', '--no-unlogged-table-data', [
'-d', $pitr1->connstr('postgres'), 'pg_dumpall', '-f',
$dump1, '--no-sync',
'--no-unlogged-table-data', '-d',
$pitr1->connstr('postgres'),
], ],
'dump from PITR 1'); 'dump from PITR 1');
$pitr1->command_ok([ $pitr1->command_ok(
'pg_dumpall', '-f', $dump2, '--no-sync', '--no-unlogged-table-data', [
'-d', $pitr1->connstr('postgres'), 'pg_dumpall', '-f',
$dump2, '--no-sync',
'--no-unlogged-table-data', '-d',
$pitr1->connstr('postgres'),
], ],
'dump from PITR 2'); 'dump from PITR 2');
@ -171,7 +188,7 @@ is($compare_res, 0, "dumps are identical");
if ($compare_res != 0) if ($compare_res != 0)
{ {
my ($stdout, $stderr) = my ($stdout, $stderr) =
run_command([ 'diff', '-u', $dump1, $dump2 ]); run_command([ 'diff', '-u', $dump1, $dump2 ]);
print "=== diff of $dump1 and $dump2\n"; print "=== diff of $dump1 and $dump2\n";
print "=== stdout ===\n"; print "=== stdout ===\n";
print $stdout; print $stdout;

View File

@ -36,14 +36,16 @@ EOM
# Now take an incremental backup. # Now take an incremental backup.
my $backup2path = $node1->backup_dir . '/backup2'; my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_ok( $node1->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', [
'--incremental', $backup1path . '/backup_manifest' ], 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest'
],
"incremental backup from node1"); "incremental backup from node1");
# Restore the incremental backup and use it to create a new node. # Restore the incremental backup and use it to create a new node.
my $node2 = PostgreSQL::Test::Cluster->new('node2'); my $node2 = PostgreSQL::Test::Cluster->new('node2');
$node2->init_from_backup($node1, 'backup2', $node2->init_from_backup($node1, 'backup2',
combine_with_prior => [ 'backup1' ]); combine_with_prior => ['backup1']);
$node2->start(); $node2->start();
# Insert rows on both nodes. # Insert rows on both nodes.
@ -57,14 +59,16 @@ EOM
# Take another incremental backup, from node2, based on backup2 from node1. # Take another incremental backup, from node2, based on backup2 from node1.
my $backup3path = $node1->backup_dir . '/backup3'; my $backup3path = $node1->backup_dir . '/backup3';
$node2->command_ok( $node2->command_ok(
[ 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast', [
'--incremental', $backup2path . '/backup_manifest' ], 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
'--incremental', $backup2path . '/backup_manifest'
],
"incremental backup from node2"); "incremental backup from node2");
# Restore the incremental backup and use it to create a new node. # Restore the incremental backup and use it to create a new node.
my $node3 = PostgreSQL::Test::Cluster->new('node3'); my $node3 = PostgreSQL::Test::Cluster->new('node3');
$node3->init_from_backup($node1, 'backup3', $node3->init_from_backup($node1, 'backup3',
combine_with_prior => [ 'backup1', 'backup2' ]); combine_with_prior => [ 'backup1', 'backup2' ]);
$node3->start(); $node3->start();
# Let's insert one more row. # Let's insert one more row.

View File

@ -33,40 +33,40 @@ sub combine_and_test_one_backup
my ($backup_name, $failure_pattern, @extra_options) = @_; my ($backup_name, $failure_pattern, @extra_options) = @_;
my $revised_backup_path = $node->backup_dir . '/' . $backup_name; my $revised_backup_path = $node->backup_dir . '/' . $backup_name;
$node->command_ok( $node->command_ok(
[ 'pg_combinebackup', $original_backup_path, '-o', $revised_backup_path, [
'--no-sync', @extra_options ], 'pg_combinebackup', $original_backup_path,
'-o', $revised_backup_path,
'--no-sync', @extra_options
],
"pg_combinebackup with @extra_options"); "pg_combinebackup with @extra_options");
if (defined $failure_pattern) if (defined $failure_pattern)
{ {
$node->command_fails_like( $node->command_fails_like([ 'pg_verifybackup', $revised_backup_path ],
[ 'pg_verifybackup', $revised_backup_path ], $failure_pattern, "unable to verify backup $backup_name");
$failure_pattern,
"unable to verify backup $backup_name");
} }
else else
{ {
$node->command_ok( $node->command_ok([ 'pg_verifybackup', $revised_backup_path ],
[ 'pg_verifybackup', $revised_backup_path ],
"verify backup $backup_name"); "verify backup $backup_name");
} }
} }
combine_and_test_one_backup('nomanifest', combine_and_test_one_backup('nomanifest',
qr/could not open file.*backup_manifest/, '--no-manifest'); qr/could not open file.*backup_manifest/,
combine_and_test_one_backup('csum_none', '--no-manifest');
undef, '--manifest-checksums=NONE'); combine_and_test_one_backup('csum_none', undef, '--manifest-checksums=NONE');
combine_and_test_one_backup('csum_sha224', combine_and_test_one_backup('csum_sha224',
undef, '--manifest-checksums=SHA224'); undef, '--manifest-checksums=SHA224');
# Verify that SHA224 is mentioned in the SHA224 manifest lots of times. # Verify that SHA224 is mentioned in the SHA224 manifest lots of times.
my $sha224_manifest = my $sha224_manifest =
slurp_file($node->backup_dir . '/csum_sha224/backup_manifest'); slurp_file($node->backup_dir . '/csum_sha224/backup_manifest');
my $sha224_count = (() = $sha224_manifest =~ /SHA224/mig); my $sha224_count = (() = $sha224_manifest =~ /SHA224/mig);
cmp_ok($sha224_count, cmp_ok($sha224_count,
'>', 100, "SHA224 is mentioned many times in SHA224 manifest"); '>', 100, "SHA224 is mentioned many times in SHA224 manifest");
# Verify that SHA224 is mentioned in the SHA224 manifest lots of times. # Verify that SHA224 is mentioned in the SHA224 manifest lots of times.
my $nocsum_manifest = my $nocsum_manifest =
slurp_file($node->backup_dir . '/csum_none/backup_manifest'); slurp_file($node->backup_dir . '/csum_none/backup_manifest');
my $nocsum_count = (() = $nocsum_manifest =~ /Checksum-Algorithm/mig); my $nocsum_count = (() = $nocsum_manifest =~ /Checksum-Algorithm/mig);
is($nocsum_count, 0, is($nocsum_count, 0,
"Checksum-Algorithm is not mentioned in no-checksum manifest"); "Checksum-Algorithm is not mentioned in no-checksum manifest");

View File

@ -25,7 +25,7 @@ $node1->start;
# cause anything to fail. # cause anything to fail.
my $strangely_named_config_file = $node1->data_dir . '/INCREMENTAL.config'; my $strangely_named_config_file = $node1->data_dir . '/INCREMENTAL.config';
open(my $icfg, '>', $strangely_named_config_file) open(my $icfg, '>', $strangely_named_config_file)
|| die "$strangely_named_config_file: $!"; || die "$strangely_named_config_file: $!";
close($icfg); close($icfg);
# Set up another new database instance. force_initdb is used because # Set up another new database instance. force_initdb is used because
@ -44,15 +44,19 @@ $node1->command_ok(
# Now take an incremental backup. # Now take an incremental backup.
my $backup2path = $node1->backup_dir . '/backup2'; my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_ok( $node1->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', [
'--incremental', $backup1path . '/backup_manifest' ], 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest'
],
"incremental backup from node1"); "incremental backup from node1");
# Now take another incremental backup. # Now take another incremental backup.
my $backup3path = $node1->backup_dir . '/backup3'; my $backup3path = $node1->backup_dir . '/backup3';
$node1->command_ok( $node1->command_ok(
[ 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast', [
'--incremental', $backup2path . '/backup_manifest' ], 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
'--incremental', $backup2path . '/backup_manifest'
],
"another incremental backup from node1"); "another incremental backup from node1");
# Take a full backup from node2. # Take a full backup from node2.
@ -64,8 +68,10 @@ $node2->command_ok(
# Take an incremental backup from node2. # Take an incremental backup from node2.
my $backupother2path = $node1->backup_dir . '/backupother2'; my $backupother2path = $node1->backup_dir . '/backupother2';
$node2->command_ok( $node2->command_ok(
[ 'pg_basebackup', '-D', $backupother2path, '--no-sync', '-cfast', [
'--incremental', $backupother1path . '/backup_manifest' ], 'pg_basebackup', '-D', $backupother2path, '--no-sync', '-cfast',
'--incremental', $backupother1path . '/backup_manifest'
],
"incremental backup from node2"); "incremental backup from node2");
# Result directory. # Result directory.
@ -85,7 +91,10 @@ $node1->command_fails_like(
# Can't combine full backup with an incremental backup from a different system. # Can't combine full backup with an incremental backup from a different system.
$node1->command_fails_like( $node1->command_fails_like(
[ 'pg_combinebackup', $backup1path, $backupother2path, '-o', $resultpath ], [
'pg_combinebackup', $backup1path, $backupother2path, '-o',
$resultpath
],
qr/expected system identifier.*but found/, qr/expected system identifier.*but found/,
"can't combine backups from different nodes"); "can't combine backups from different nodes");
@ -95,7 +104,10 @@ rename("$backup2path/backup_manifest", "$backup2path/backup_manifest.orig")
copy("$backupother2path/backup_manifest", "$backup2path/backup_manifest") copy("$backupother2path/backup_manifest", "$backup2path/backup_manifest")
or die "could not copy $backupother2path/backup_manifest"; or die "could not copy $backupother2path/backup_manifest";
$node1->command_fails_like( $node1->command_fails_like(
[ 'pg_combinebackup', $backup1path, $backup2path, $backup3path, '-o', $resultpath ], [
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
'-o', $resultpath
],
qr/ manifest system identifier is .*, but control file has /, qr/ manifest system identifier is .*, but control file has /,
"can't combine backups with different manifest system identifier "); "can't combine backups with different manifest system identifier ");
# Restore the backup state # Restore the backup state
@ -110,20 +122,29 @@ $node1->command_fails_like(
# Can't combine backups in the wrong order. # Can't combine backups in the wrong order.
$node1->command_fails_like( $node1->command_fails_like(
[ 'pg_combinebackup', $backup1path, $backup3path, $backup2path, '-o', $resultpath ], [
'pg_combinebackup', $backup1path, $backup3path, $backup2path,
'-o', $resultpath
],
qr/starts at LSN.*but expected/, qr/starts at LSN.*but expected/,
"can't combine backups in the wrong order"); "can't combine backups in the wrong order");
# Can combine 3 backups that match up properly. # Can combine 3 backups that match up properly.
$node1->command_ok( $node1->command_ok(
[ 'pg_combinebackup', $backup1path, $backup2path, $backup3path, '-o', $resultpath ], [
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
'-o', $resultpath
],
"can combine 3 matching backups"); "can combine 3 matching backups");
rmtree($resultpath); rmtree($resultpath);
# Can combine full backup with first incremental. # Can combine full backup with first incremental.
my $synthetic12path = $node1->backup_dir . '/synthetic12'; my $synthetic12path = $node1->backup_dir . '/synthetic12';
$node1->command_ok( $node1->command_ok(
[ 'pg_combinebackup', $backup1path, $backup2path, '-o', $synthetic12path ], [
'pg_combinebackup', $backup1path, $backup2path, '-o',
$synthetic12path
],
"can combine 2 matching backups"); "can combine 2 matching backups");
# Can combine result of previous step with second incremental. # Can combine result of previous step with second incremental.

View File

@ -36,23 +36,29 @@ EOM
# Take an incremental backup. # Take an incremental backup.
my $backup2path = $primary->backup_dir . '/backup2'; my $backup2path = $primary->backup_dir . '/backup2';
$primary->command_ok( $primary->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', [
'--incremental', $backup1path . '/backup_manifest' ], 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest'
],
"incremental backup"); "incremental backup");
# Recover the incremental backup. # Recover the incremental backup.
my $restore = PostgreSQL::Test::Cluster->new('restore'); my $restore = PostgreSQL::Test::Cluster->new('restore');
$restore->init_from_backup($primary, 'backup2', $restore->init_from_backup($primary, 'backup2',
combine_with_prior => [ 'backup1' ]); combine_with_prior => ['backup1']);
$restore->start(); $restore->start();
# Query the DB. # Query the DB.
my $stdout; my $stdout;
my $stderr; my $stderr;
$restore->psql('lakh', 'SELECT * FROM t1', $restore->psql(
stdout => \$stdout, stderr => \$stderr); 'lakh', 'SELECT * FROM t1',
stdout => \$stdout,
stderr => \$stderr);
is($stdout, '', 'SELECT * FROM t1: no stdout'); is($stdout, '', 'SELECT * FROM t1: no stdout');
like($stderr, qr/relation "t1" does not exist/, like(
'SELECT * FROM t1: stderr missing table'); $stderr,
qr/relation "t1" does not exist/,
'SELECT * FROM t1: stderr missing table');
done_testing(); done_testing();

View File

@ -3855,9 +3855,7 @@ my %tests = (
\QCREATE INDEX measurement_city_id_logdate_idx ON ONLY dump_test.measurement USING\E \QCREATE INDEX measurement_city_id_logdate_idx ON ONLY dump_test.measurement USING\E
/xm, /xm,
like => { like => {
%full_runs, %full_runs, %dump_test_schema_runs, section_post_data => 1,
%dump_test_schema_runs,
section_post_data => 1,
}, },
unlike => { unlike => {
exclude_dump_test_schema => 1, exclude_dump_test_schema => 1,
@ -4783,10 +4781,8 @@ $node->command_fails_like(
############################################################## ##############################################################
# Test dumping pg_catalog (for research -- cannot be reloaded) # Test dumping pg_catalog (for research -- cannot be reloaded)
$node->command_ok( $node->command_ok([ 'pg_dump', '-p', "$port", '-n', 'pg_catalog' ],
[ 'pg_dump', '-p', "$port", '-n', 'pg_catalog' ], 'pg_dump: option -n pg_catalog');
'pg_dump: option -n pg_catalog'
);
######################################### #########################################
# Test valid database exclusion patterns # Test valid database exclusion patterns
@ -4953,8 +4949,8 @@ foreach my $run (sort keys %pgdump_runs)
} }
# Check for useless entries in "unlike" list. Runs that are # Check for useless entries in "unlike" list. Runs that are
# not listed in "like" don't need to be excluded in "unlike". # not listed in "like" don't need to be excluded in "unlike".
if ($tests{$test}->{unlike}->{$test_key} && if ($tests{$test}->{unlike}->{$test_key}
!defined($tests{$test}->{like}->{$test_key})) && !defined($tests{$test}->{like}->{$test_key}))
{ {
die "useless \"unlike\" entry \"$test_key\" in test \"$test\""; die "useless \"unlike\" entry \"$test_key\" in test \"$test\"";
} }

View File

@ -56,8 +56,8 @@ sub run_test
"in standby4"; "in standby4";
# Skip testing .DS_Store files on macOS to avoid risk of side effects # Skip testing .DS_Store files on macOS to avoid risk of side effects
append_to_file append_to_file
"$test_standby_datadir/tst_standby_dir/.DS_Store", "$test_standby_datadir/tst_standby_dir/.DS_Store", "macOS system file"
"macOS system file" unless ($Config{osname} eq 'darwin'); unless ($Config{osname} eq 'darwin');
mkdir "$test_primary_datadir/tst_primary_dir"; mkdir "$test_primary_datadir/tst_primary_dir";
append_to_file "$test_primary_datadir/tst_primary_dir/primary_file1", append_to_file "$test_primary_datadir/tst_primary_dir/primary_file1",

View File

@ -51,7 +51,7 @@ typedef struct
int threshold_version; int threshold_version;
/* A function pointer for determining if the check applies */ /* A function pointer for determining if the check applies */
DataTypesUsageVersionCheck version_hook; DataTypesUsageVersionCheck version_hook;
} DataTypesUsageChecks; } DataTypesUsageChecks;
/* /*
* Special values for threshold_version for indicating that a check applies to * Special values for threshold_version for indicating that a check applies to
@ -109,17 +109,17 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/ */
{ {
.status = gettext_noop("Checking for system-defined composite types in user tables"), .status = gettext_noop("Checking for system-defined composite types in user tables"),
.report_filename = "tables_using_composite.txt", .report_filename = "tables_using_composite.txt",
.base_query = .base_query =
"SELECT t.oid FROM pg_catalog.pg_type t " "SELECT t.oid FROM pg_catalog.pg_type t "
"LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid " "LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
" WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')", " WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
.report_text = .report_text =
gettext_noop("Your installation contains system-defined composite types in user tables.\n" gettext_noop("Your installation contains system-defined composite types in user tables.\n"
"These type OIDs are not stable across PostgreSQL versions,\n" "These type OIDs are not stable across PostgreSQL versions,\n"
"so this cluster cannot currently be upgraded. You can drop the\n" "so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns and restart the upgrade.\n"), "problem columns and restart the upgrade.\n"),
.threshold_version = ALL_VERSIONS .threshold_version = ALL_VERSIONS
}, },
/* /*
@ -130,16 +130,16 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/ */
{ {
.status = gettext_noop("Checking for incompatible \"line\" data type"), .status = gettext_noop("Checking for incompatible \"line\" data type"),
.report_filename = "tables_using_line.txt", .report_filename = "tables_using_line.txt",
.base_query = .base_query =
"SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid", "SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
.report_text = .report_text =
gettext_noop("Your installation contains the \"line\" data type in user tables.\n" gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
"This data type changed its internal and input/output format\n" "This data type changed its internal and input/output format\n"
"between your old and new versions so this\n" "between your old and new versions so this\n"
"cluster cannot currently be upgraded. You can\n" "cluster cannot currently be upgraded. You can\n"
"drop the problem columns and restart the upgrade.\n"), "drop the problem columns and restart the upgrade.\n"),
.threshold_version = 903 .threshold_version = 903
}, },
/* /*
@ -152,37 +152,37 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/ */
{ {
.status = gettext_noop("Checking for reg* data types in user tables"), .status = gettext_noop("Checking for reg* data types in user tables"),
.report_filename = "tables_using_reg.txt", .report_filename = "tables_using_reg.txt",
/* /*
* Note: older servers will not have all of these reg* types, so we * Note: older servers will not have all of these reg* types, so we
* have to write the query like this rather than depending on casts to * have to write the query like this rather than depending on casts to
* regtype. * regtype.
*/ */
.base_query = .base_query =
"SELECT oid FROM pg_catalog.pg_type t " "SELECT oid FROM pg_catalog.pg_type t "
"WHERE t.typnamespace = " "WHERE t.typnamespace = "
" (SELECT oid FROM pg_catalog.pg_namespace " " (SELECT oid FROM pg_catalog.pg_namespace "
" WHERE nspname = 'pg_catalog') " " WHERE nspname = 'pg_catalog') "
" AND t.typname IN ( " " AND t.typname IN ( "
/* pg_class.oid is preserved, so 'regclass' is OK */ /* pg_class.oid is preserved, so 'regclass' is OK */
" 'regcollation', " " 'regcollation', "
" 'regconfig', " " 'regconfig', "
" 'regdictionary', " " 'regdictionary', "
" 'regnamespace', " " 'regnamespace', "
" 'regoper', " " 'regoper', "
" 'regoperator', " " 'regoperator', "
" 'regproc', " " 'regproc', "
" 'regprocedure' " " 'regprocedure' "
/* pg_authid.oid is preserved, so 'regrole' is OK */ /* pg_authid.oid is preserved, so 'regrole' is OK */
/* pg_type.oid is (mostly) preserved, so 'regtype' is OK */ /* pg_type.oid is (mostly) preserved, so 'regtype' is OK */
" )", " )",
.report_text = .report_text =
gettext_noop("Your installation contains one of the reg* data types in user tables.\n" gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
"These data types reference system OIDs that are not preserved by\n" "These data types reference system OIDs that are not preserved by\n"
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n" "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
"drop the problem columns and restart the upgrade.\n"), "drop the problem columns and restart the upgrade.\n"),
.threshold_version = ALL_VERSIONS .threshold_version = ALL_VERSIONS
}, },
/* /*
@ -191,15 +191,15 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/ */
{ {
.status = gettext_noop("Checking for incompatible \"aclitem\" data type"), .status = gettext_noop("Checking for incompatible \"aclitem\" data type"),
.report_filename = "tables_using_aclitem.txt", .report_filename = "tables_using_aclitem.txt",
.base_query = .base_query =
"SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid", "SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
.report_text = .report_text =
gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n" gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
"The internal format of \"aclitem\" changed in PostgreSQL version 16\n" "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
"so this cluster cannot currently be upgraded. You can drop the\n" "so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns and restart the upgrade.\n"), "problem columns and restart the upgrade.\n"),
.threshold_version = 1500 .threshold_version = 1500
}, },
/* /*
@ -215,15 +215,15 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/ */
{ {
.status = gettext_noop("Checking for invalid \"unknown\" user columns"), .status = gettext_noop("Checking for invalid \"unknown\" user columns"),
.report_filename = "tables_using_unknown.txt", .report_filename = "tables_using_unknown.txt",
.base_query = .base_query =
"SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid", "SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
.report_text = .report_text =
gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n" gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
"This data type is no longer allowed in tables, so this cluster\n" "This data type is no longer allowed in tables, so this cluster\n"
"cannot currently be upgraded. You can drop the problem columns\n" "cannot currently be upgraded. You can drop the problem columns\n"
"and restart the upgrade.\n"), "and restart the upgrade.\n"),
.threshold_version = 906 .threshold_version = 906
}, },
/* /*
@ -237,15 +237,15 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/ */
{ {
.status = gettext_noop("Checking for invalid \"sql_identifier\" user columns"), .status = gettext_noop("Checking for invalid \"sql_identifier\" user columns"),
.report_filename = "tables_using_sql_identifier.txt", .report_filename = "tables_using_sql_identifier.txt",
.base_query = .base_query =
"SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid", "SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
.report_text = .report_text =
gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n" gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
"The on-disk format for this data type has changed, so this\n" "The on-disk format for this data type has changed, so this\n"
"cluster cannot currently be upgraded. You can drop the problem\n" "cluster cannot currently be upgraded. You can drop the problem\n"
"columns and restart the upgrade.\n"), "columns and restart the upgrade.\n"),
.threshold_version = 1100 .threshold_version = 1100
}, },
/* /*
@ -253,16 +253,16 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/ */
{ {
.status = gettext_noop("Checking for incompatible \"jsonb\" data type in user tables"), .status = gettext_noop("Checking for incompatible \"jsonb\" data type in user tables"),
.report_filename = "tables_using_jsonb.txt", .report_filename = "tables_using_jsonb.txt",
.base_query = .base_query =
"SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid", "SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
.report_text = .report_text =
gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n" gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
"The internal format of \"jsonb\" changed during 9.4 beta so this\n" "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
"cluster cannot currently be upgraded. You can drop the problem \n" "cluster cannot currently be upgraded. You can drop the problem \n"
"columns and restart the upgrade.\n"), "columns and restart the upgrade.\n"),
.threshold_version = MANUAL_CHECK, .threshold_version = MANUAL_CHECK,
.version_hook = jsonb_9_4_check_applicable .version_hook = jsonb_9_4_check_applicable
}, },
/* /*
@ -270,42 +270,42 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/ */
{ {
.status = gettext_noop("Checking for removed \"abstime\" data type in user tables"), .status = gettext_noop("Checking for removed \"abstime\" data type in user tables"),
.report_filename = "tables_using_abstime.txt", .report_filename = "tables_using_abstime.txt",
.base_query = .base_query =
"SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid", "SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
.report_text = .report_text =
gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n" gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
"The \"abstime\" type has been removed in PostgreSQL version 12,\n" "The \"abstime\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n" "so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n" "problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"), "the upgrade.\n"),
.threshold_version = 1100 .threshold_version = 1100
}, },
{ {
.status = gettext_noop("Checking for removed \"reltime\" data type in user tables"), .status = gettext_noop("Checking for removed \"reltime\" data type in user tables"),
.report_filename = "tables_using_reltime.txt", .report_filename = "tables_using_reltime.txt",
.base_query = .base_query =
"SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid", "SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
.report_text = .report_text =
gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n" gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
"The \"reltime\" type has been removed in PostgreSQL version 12,\n" "The \"reltime\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n" "so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n" "problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"), "the upgrade.\n"),
.threshold_version = 1100 .threshold_version = 1100
}, },
{ {
.status = gettext_noop("Checking for removed \"tinterval\" data type in user tables"), .status = gettext_noop("Checking for removed \"tinterval\" data type in user tables"),
.report_filename = "tables_using_tinterval.txt", .report_filename = "tables_using_tinterval.txt",
.base_query = .base_query =
"SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid", "SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
.report_text = .report_text =
gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n" gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
"The \"tinterval\" type has been removed in PostgreSQL version 12,\n" "The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n" "so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n" "problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"), "the upgrade.\n"),
.threshold_version = 1100 .threshold_version = 1100
}, },
/* End of checks marker, must remain last */ /* End of checks marker, must remain last */
@ -334,7 +334,7 @@ static DataTypesUsageChecks data_types_usage_checks[] =
* there's no storage involved in a view. * there's no storage involved in a view.
*/ */
static void static void
check_for_data_types_usage(ClusterInfo *cluster, DataTypesUsageChecks * checks) check_for_data_types_usage(ClusterInfo *cluster, DataTypesUsageChecks *checks)
{ {
bool found = false; bool found = false;
bool *results; bool *results;

View File

@ -31,7 +31,8 @@ $newpub->init(allows_streaming => 'logical');
# completely till it is open. The probability of seeing this behavior is # completely till it is open. The probability of seeing this behavior is
# higher in this test because we use wal_level as logical via # higher in this test because we use wal_level as logical via
# allows_streaming => 'logical' which in turn set shared_buffers as 1MB. # allows_streaming => 'logical' which in turn set shared_buffers as 1MB.
$newpub->append_conf('postgresql.conf', q{ $newpub->append_conf(
'postgresql.conf', q{
bgwriter_lru_maxpages = 0 bgwriter_lru_maxpages = 0
checkpoint_timeout = 1h checkpoint_timeout = 1h
}); });
@ -81,7 +82,7 @@ command_checks_all(
[qr//], [qr//],
'run of pg_upgrade where the new cluster has insufficient max_replication_slots' 'run of pg_upgrade where the new cluster has insufficient max_replication_slots'
); );
ok( -d $newpub->data_dir . "/pg_upgrade_output.d", ok(-d $newpub->data_dir . "/pg_upgrade_output.d",
"pg_upgrade_output.d/ not removed after pg_upgrade failure"); "pg_upgrade_output.d/ not removed after pg_upgrade failure");
# Set 'max_replication_slots' to match the number of slots (2) present on the # Set 'max_replication_slots' to match the number of slots (2) present on the

View File

@ -291,8 +291,7 @@ regress_sub5|f|f),
# Subscription relations should be preserved # Subscription relations should be preserved
$result = $new_sub->safe_psql('postgres', $result = $new_sub->safe_psql('postgres',
"SELECT srrelid, srsubstate FROM pg_subscription_rel ORDER BY srrelid" "SELECT srrelid, srsubstate FROM pg_subscription_rel ORDER BY srrelid");
);
is( $result, qq($tab_upgraded1_oid|r is( $result, qq($tab_upgraded1_oid|r
$tab_upgraded2_oid|i), $tab_upgraded2_oid|i),
"there should be 2 rows in pg_subscription_rel(representing tab_upgraded1 and tab_upgraded2)" "there should be 2 rows in pg_subscription_rel(representing tab_upgraded1 and tab_upgraded2)"

View File

@ -72,7 +72,8 @@ my @scenario = (
{ {
'name' => 'system_identifier', 'name' => 'system_identifier',
'mutilate' => \&mutilate_system_identifier, 'mutilate' => \&mutilate_system_identifier,
'fails_like' => qr/manifest system identifier is .*, but control file has/ 'fails_like' =>
qr/manifest system identifier is .*, but control file has/
}, },
{ {
'name' => 'bad_manifest', 'name' => 'bad_manifest',
@ -254,8 +255,9 @@ sub mutilate_system_identifier
$node->init(force_initdb => 1, allows_streaming => 1); $node->init(force_initdb => 1, allows_streaming => 1);
$node->start; $node->start;
$node->backup('backup2'); $node->backup('backup2');
move($node->backup_dir.'/backup2/backup_manifest', $backup_path.'/backup_manifest') move($node->backup_dir . '/backup2/backup_manifest',
or BAIL_OUT "could not copy manifest to $backup_path"; $backup_path . '/backup_manifest')
or BAIL_OUT "could not copy manifest to $backup_path";
$node->teardown_node(fail_ok => 1); $node->teardown_node(fail_ok => 1);
return; return;
} }

View File

@ -12,7 +12,8 @@ use Test::More;
my $tempdir = PostgreSQL::Test::Utils::tempdir; my $tempdir = PostgreSQL::Test::Utils::tempdir;
test_bad_manifest('input string ended unexpectedly', test_bad_manifest(
'input string ended unexpectedly',
qr/could not parse backup manifest: The input string ended unexpectedly/, qr/could not parse backup manifest: The input string ended unexpectedly/,
<<EOM); <<EOM);
{ {

View File

@ -12,21 +12,47 @@ program_version_ok('pg_waldump');
program_options_handling_ok('pg_waldump'); program_options_handling_ok('pg_waldump');
# wrong number of arguments # wrong number of arguments
command_fails_like([ 'pg_waldump', ], qr/error: no arguments/, 'no arguments'); command_fails_like([ 'pg_waldump', ], qr/error: no arguments/,
command_fails_like([ 'pg_waldump', 'foo', 'bar', 'baz' ], qr/error: too many command-line arguments/, 'too many arguments'); 'no arguments');
command_fails_like(
[ 'pg_waldump', 'foo', 'bar', 'baz' ],
qr/error: too many command-line arguments/,
'too many arguments');
# invalid option arguments # invalid option arguments
command_fails_like([ 'pg_waldump', '--block', 'bad' ], qr/error: invalid block number/, 'invalid block number'); command_fails_like(
command_fails_like([ 'pg_waldump', '--fork', 'bad' ], qr/error: invalid fork name/, 'invalid fork name'); [ 'pg_waldump', '--block', 'bad' ],
command_fails_like([ 'pg_waldump', '--limit', 'bad' ], qr/error: invalid value/, 'invalid limit'); qr/error: invalid block number/,
command_fails_like([ 'pg_waldump', '--relation', 'bad' ], qr/error: invalid relation/, 'invalid relation specification'); 'invalid block number');
command_fails_like([ 'pg_waldump', '--rmgr', 'bad' ], qr/error: resource manager .* does not exist/, 'invalid rmgr name'); command_fails_like(
command_fails_like([ 'pg_waldump', '--start', 'bad' ], qr/error: invalid WAL location/, 'invalid start LSN'); [ 'pg_waldump', '--fork', 'bad' ],
command_fails_like([ 'pg_waldump', '--end', 'bad' ], qr/error: invalid WAL location/, 'invalid end LSN'); qr/error: invalid fork name/,
'invalid fork name');
command_fails_like(
[ 'pg_waldump', '--limit', 'bad' ],
qr/error: invalid value/,
'invalid limit');
command_fails_like(
[ 'pg_waldump', '--relation', 'bad' ],
qr/error: invalid relation/,
'invalid relation specification');
command_fails_like(
[ 'pg_waldump', '--rmgr', 'bad' ],
qr/error: resource manager .* does not exist/,
'invalid rmgr name');
command_fails_like(
[ 'pg_waldump', '--start', 'bad' ],
qr/error: invalid WAL location/,
'invalid start LSN');
command_fails_like(
[ 'pg_waldump', '--end', 'bad' ],
qr/error: invalid WAL location/,
'invalid end LSN');
# rmgr list: If you add one to the list, consider also adding a test # rmgr list: If you add one to the list, consider also adding a test
# case exercising the new rmgr below. # case exercising the new rmgr below.
command_like([ 'pg_waldump', '--rmgr=list'], qr/^XLOG command_like(
[ 'pg_waldump', '--rmgr=list' ], qr/^XLOG
Transaction Transaction
Storage Storage
CLOG CLOG
@ -53,7 +79,8 @@ LogicalMessage$/,
my $node = PostgreSQL::Test::Cluster->new('main'); my $node = PostgreSQL::Test::Cluster->new('main');
$node->init; $node->init;
$node->append_conf('postgresql.conf', q{ $node->append_conf(
'postgresql.conf', q{
autovacuum = off autovacuum = off
checkpoint_timeout = 1h checkpoint_timeout = 1h
@ -66,9 +93,13 @@ wal_level=logical
}); });
$node->start; $node->start;
my ($start_lsn, $start_walfile) = split /\|/, $node->safe_psql('postgres', q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())}); my ($start_lsn, $start_walfile) = split /\|/,
$node->safe_psql('postgres',
q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())}
);
$node->safe_psql('postgres', q{ $node->safe_psql(
'postgres', q{
-- heap, btree, hash, sequence -- heap, btree, hash, sequence
CREATE TABLE t1 (a int GENERATED ALWAYS AS IDENTITY, b text); CREATE TABLE t1 (a int GENERATED ALWAYS AS IDENTITY, b text);
CREATE INDEX i1a ON t1 USING btree (a); CREATE INDEX i1a ON t1 USING btree (a);
@ -125,32 +156,75 @@ DROP DATABASE d1;
my $tblspc_path = PostgreSQL::Test::Utils::tempdir_short(); my $tblspc_path = PostgreSQL::Test::Utils::tempdir_short();
$node->safe_psql('postgres', qq{ $node->safe_psql(
'postgres', qq{
CREATE TABLESPACE ts1 LOCATION '$tblspc_path'; CREATE TABLESPACE ts1 LOCATION '$tblspc_path';
DROP TABLESPACE ts1; DROP TABLESPACE ts1;
}); });
my ($end_lsn, $end_walfile) = split /\|/, $node->safe_psql('postgres', q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())}); my ($end_lsn, $end_walfile) = split /\|/,
$node->safe_psql('postgres',
q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())}
);
my $default_ts_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_tablespace WHERE spcname = 'pg_default'}); my $default_ts_oid = $node->safe_psql('postgres',
my $postgres_db_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_database WHERE datname = 'postgres'}); q{SELECT oid FROM pg_tablespace WHERE spcname = 'pg_default'});
my $rel_t1_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_class WHERE relname = 't1'}); my $postgres_db_oid = $node->safe_psql('postgres',
my $rel_i1a_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_class WHERE relname = 'i1a'}); q{SELECT oid FROM pg_database WHERE datname = 'postgres'});
my $rel_t1_oid = $node->safe_psql('postgres',
q{SELECT oid FROM pg_class WHERE relname = 't1'});
my $rel_i1a_oid = $node->safe_psql('postgres',
q{SELECT oid FROM pg_class WHERE relname = 'i1a'});
$node->stop; $node->stop;
# various ways of specifying WAL range # various ways of specifying WAL range
command_fails_like([ 'pg_waldump', 'foo', 'bar' ], qr/error: could not locate WAL file "foo"/, 'start file not found'); command_fails_like(
command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile ], qr/./, 'runs with start segment specified'); [ 'pg_waldump', 'foo', 'bar' ],
command_fails_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, 'bar' ], qr/error: could not open file "bar"/, 'end file not found'); qr/error: could not locate WAL file "foo"/,
command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, $node->data_dir . '/pg_wal/' . $end_walfile ], qr/./, 'runs with start and end segment specified'); 'start file not found');
command_fails_like([ 'pg_waldump', '-p', $node->data_dir ], qr/error: no start WAL location given/, 'path option requires start location'); command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile ],
command_like([ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end', $end_lsn ], qr/./, 'runs with path option and start and end locations'); qr/./, 'runs with start segment specified');
command_fails_like([ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ], qr/error: error in WAL record at/, 'falling off the end of the WAL results in an error'); command_fails_like(
[ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, 'bar' ],
qr/error: could not open file "bar"/,
'end file not found');
command_like(
[
'pg_waldump',
$node->data_dir . '/pg_wal/' . $start_walfile,
$node->data_dir . '/pg_wal/' . $end_walfile
],
qr/./,
'runs with start and end segment specified');
command_fails_like(
[ 'pg_waldump', '-p', $node->data_dir ],
qr/error: no start WAL location given/,
'path option requires start location');
command_like(
[
'pg_waldump', '-p', $node->data_dir, '--start',
$start_lsn, '--end', $end_lsn
],
qr/./,
'runs with path option and start and end locations');
command_fails_like(
[ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ],
qr/error: error in WAL record at/,
'falling off the end of the WAL results in an error');
command_like([ 'pg_waldump', '--quiet', $node->data_dir . '/pg_wal/' . $start_walfile ], qr/^$/, 'no output with --quiet option'); command_like(
command_fails_like([ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ], qr/error: error in WAL record at/, 'errors are shown with --quiet'); [
'pg_waldump', '--quiet',
$node->data_dir . '/pg_wal/' . $start_walfile
],
qr/^$/,
'no output with --quiet option');
command_fails_like(
[ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ],
qr/error: error in WAL record at/,
'errors are shown with --quiet');
# Test for: Display a message that we're skipping data if `from` # Test for: Display a message that we're skipping data if `from`
@ -165,7 +239,9 @@ command_fails_like([ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start',
my (@cmd, $stdout, $stderr, $result); my (@cmd, $stdout, $stderr, $result);
@cmd = ( 'pg_waldump', '--start', $new_start, $node->data_dir . '/pg_wal/' . $start_walfile ); @cmd = (
'pg_waldump', '--start', $new_start,
$node->data_dir . '/pg_wal/' . $start_walfile);
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr; $result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
ok($result, "runs with start segment and start LSN specified"); ok($result, "runs with start segment and start LSN specified");
like($stderr, qr/first record is after/, 'info message printed'); like($stderr, qr/first record is after/, 'info message printed');
@ -181,7 +257,9 @@ sub test_pg_waldump
my (@cmd, $stdout, $stderr, $result, @lines); my (@cmd, $stdout, $stderr, $result, @lines);
@cmd = ('pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end', $end_lsn); @cmd = (
'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end',
$end_lsn);
push @cmd, @opts; push @cmd, @opts;
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr; $result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
ok($result, "pg_waldump @opts: runs ok"); ok($result, "pg_waldump @opts: runs ok");
@ -216,10 +294,15 @@ is(grep(!/^rmgr: Btree/, @lines), 0, 'only Btree lines');
@lines = test_pg_waldump('--fork', 'init'); @lines = test_pg_waldump('--fork', 'init');
is(grep(!/fork init/, @lines), 0, 'only init fork lines'); is(grep(!/fork init/, @lines), 0, 'only init fork lines');
@lines = test_pg_waldump('--relation', "$default_ts_oid/$postgres_db_oid/$rel_t1_oid"); @lines = test_pg_waldump('--relation',
is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines), 0, 'only lines for selected relation'); "$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines),
0, 'only lines for selected relation');
@lines = test_pg_waldump('--relation', "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid", '--block', 1); @lines =
test_pg_waldump('--relation',
"$default_ts_oid/$postgres_db_oid/$rel_i1a_oid",
'--block', 1);
is(grep(!/\bblk 1\b/, @lines), 0, 'only lines for selected block'); is(grep(!/\bblk 1\b/, @lines), 0, 'only lines for selected block');

View File

@ -74,15 +74,15 @@ SELECT tli, start_lsn, end_lsn from pg_available_wal_summaries()
WHERE end_lsn > '$summarized_lsn' WHERE end_lsn > '$summarized_lsn'
EOM EOM
my @lines = split(/\n/, $details); my @lines = split(/\n/, $details);
is(0+@lines, 1, "got exactly one new WAL summary"); is(0 + @lines, 1, "got exactly one new WAL summary");
my ($tli, $start_lsn, $end_lsn) = split(/\|/, $lines[0]); my ($tli, $start_lsn, $end_lsn) = split(/\|/, $lines[0]);
note("examining summary for TLI $tli from $start_lsn to $end_lsn"); note("examining summary for TLI $tli from $start_lsn to $end_lsn");
# Reconstruct the full pathname for the WAL summary file. # Reconstruct the full pathname for the WAL summary file.
my $filename = sprintf "%s/pg_wal/summaries/%08s%08s%08s%08s%08s.summary", my $filename = sprintf "%s/pg_wal/summaries/%08s%08s%08s%08s%08s.summary",
$node1->data_dir, $tli, $node1->data_dir, $tli,
split(m@/@, $start_lsn), split(m@/@, $start_lsn),
split(m@/@, $end_lsn); split(m@/@, $end_lsn);
ok(-f $filename, "WAL summary file exists"); ok(-f $filename, "WAL summary file exists");
# Run pg_walsummary on it. We expect exactly two blocks to be modified, # Run pg_walsummary on it. We expect exactly two blocks to be modified,
@ -92,6 +92,6 @@ note($stdout);
@lines = split(/\n/, $stdout); @lines = split(/\n/, $stdout);
like($stdout, qr/FORK main: block 0$/m, "stdout shows block 0 modified"); like($stdout, qr/FORK main: block 0$/m, "stdout shows block 0 modified");
is($stderr, '', 'stderr is empty'); is($stderr, '', 'stderr is empty');
is(0+@lines, 2, "UPDATE modified 2 blocks"); is(0 + @lines, 2, "UPDATE modified 2 blocks");
done_testing(); done_testing();

View File

@ -1541,18 +1541,13 @@ $node->safe_psql('postgres', 'DROP TABLE first_client_table, xy;');
# Test --exit-on-abort # Test --exit-on-abort
$node->safe_psql('postgres', $node->safe_psql('postgres',
'CREATE TABLE counter(i int); '. 'CREATE TABLE counter(i int); ' . 'INSERT INTO counter VALUES (0);');
'INSERT INTO counter VALUES (0);'
);
$node->pgbench( $node->pgbench(
'-t 10 -c 2 -j 2 --exit-on-abort', '-t 10 -c 2 -j 2 --exit-on-abort',
2, 2,
[], [],
[ [ qr{division by zero}, qr{Run was aborted due to an error in thread} ],
qr{division by zero},
qr{Run was aborted due to an error in thread}
],
'test --exit-on-abort', 'test --exit-on-abort',
{ {
'001_exit_on_abort' => q{ '001_exit_on_abort' => q{

View File

@ -370,7 +370,8 @@ psql_fails_like(
psql_like( psql_like(
$node, $node,
sprintf(q{with x as ( sprintf(
q{with x as (
select now()-backend_start AS howlong select now()-backend_start AS howlong
from pg_stat_activity from pg_stat_activity
where pid = pg_backend_pid() where pid = pg_backend_pid()
@ -416,20 +417,23 @@ psql_like($node, "SELECT 'one' \\g | $pipe_cmd", qr//, "one command \\g");
my $c1 = slurp_file($g_file); my $c1 = slurp_file($g_file);
like($c1, qr/one/); like($c1, qr/one/);
psql_like($node, "SELECT 'two' \\; SELECT 'three' \\g | $pipe_cmd", qr//, "two commands \\g"); psql_like($node, "SELECT 'two' \\; SELECT 'three' \\g | $pipe_cmd",
qr//, "two commands \\g");
my $c2 = slurp_file($g_file); my $c2 = slurp_file($g_file);
like($c2, qr/two.*three/s); like($c2, qr/two.*three/s);
psql_like($node, "\\set SHOW_ALL_RESULTS 0\nSELECT 'four' \\; SELECT 'five' \\g | $pipe_cmd", qr//, psql_like(
"two commands \\g with only last result"); $node,
"\\set SHOW_ALL_RESULTS 0\nSELECT 'four' \\; SELECT 'five' \\g | $pipe_cmd",
qr//,
"two commands \\g with only last result");
my $c3 = slurp_file($g_file); my $c3 = slurp_file($g_file);
like($c3, qr/five/); like($c3, qr/five/);
unlike($c3, qr/four/); unlike($c3, qr/four/);
psql_like($node, "copy (values ('foo'),('bar')) to stdout \\g | $pipe_cmd", psql_like($node, "copy (values ('foo'),('bar')) to stdout \\g | $pipe_cmd",
qr//, qr//, "copy output passed to \\g pipe");
"copy output passed to \\g pipe");
my $c4 = slurp_file($g_file); my $c4 = slurp_file($g_file);
like($c4, qr/foo.*bar/s); like($c4, qr/foo.*bar/s);

View File

@ -25,13 +25,14 @@ $node->safe_psql(
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'; UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
)); ));
$node->command_ok([ 'clusterdb', '-a' ], $node->command_ok([ 'clusterdb', '-a' ],
'invalid database not targeted by clusterdb -a'); 'invalid database not targeted by clusterdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an # Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 010_clusterdb.pl as well. # invalid database in 010_clusterdb.pl as well.
$node->command_fails_like([ 'clusterdb', '-d', 'regression_invalid'], $node->command_fails_like(
qr/FATAL: cannot connect to invalid database "regression_invalid"/, [ 'clusterdb', '-d', 'regression_invalid' ],
'clusterdb cannot target invalid database'); qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'clusterdb cannot target invalid database');
$node->safe_psql('postgres', $node->safe_psql('postgres',
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x' 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'

View File

@ -38,6 +38,6 @@ $node->safe_psql(
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'; UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
)); ));
$node->command_ok([ 'dropdb', 'regression_invalid' ], $node->command_ok([ 'dropdb', 'regression_invalid' ],
'invalid database can be dropped'); 'invalid database can be dropped');
done_testing(); done_testing();

View File

@ -44,12 +44,13 @@ $node->safe_psql(
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'; UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
)); ));
$node->command_ok([ 'reindexdb', '-a' ], $node->command_ok([ 'reindexdb', '-a' ],
'invalid database not targeted by reindexdb -a'); 'invalid database not targeted by reindexdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an # Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 090_reindexdb.pl as well. # invalid database in 090_reindexdb.pl as well.
$node->command_fails_like([ 'reindexdb', '-d', 'regression_invalid'], $node->command_fails_like(
qr/FATAL: cannot connect to invalid database "regression_invalid"/, [ 'reindexdb', '-d', 'regression_invalid' ],
'reindexdb cannot target invalid database'); qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'reindexdb cannot target invalid database');
done_testing(); done_testing();

View File

@ -168,7 +168,10 @@ $node->issues_sql_like(
qr/^(?!.*VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar).*$/s, qr/^(?!.*VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar).*$/s,
'vacuumdb --exclude-schema'); 'vacuumdb --exclude-schema');
$node->issues_sql_like( $node->issues_sql_like(
[ 'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema', '"Bar"', 'postgres' ], [
'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema',
'"Bar"', 'postgres'
],
qr/^(?!.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar qr/^(?!.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
| VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz).*$/sx, | VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz).*$/sx,
'vacuumdb multiple --exclude-schema switches'); 'vacuumdb multiple --exclude-schema switches');

View File

@ -22,12 +22,13 @@ $node->safe_psql(
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'; UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
)); ));
$node->command_ok([ 'vacuumdb', '-a' ], $node->command_ok([ 'vacuumdb', '-a' ],
'invalid database not targeted by vacuumdb -a'); 'invalid database not targeted by vacuumdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an # Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 010_vacuumdb.pl as well. # invalid database in 010_vacuumdb.pl as well.
$node->command_fails_like([ 'vacuumdb', '-d', 'regression_invalid'], $node->command_fails_like(
qr/FATAL: cannot connect to invalid database "regression_invalid"/, [ 'vacuumdb', '-d', 'regression_invalid' ],
'vacuumdb cannot target invalid database'); qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'vacuumdb cannot target invalid database');
done_testing(); done_testing();

View File

@ -173,7 +173,7 @@ typedef struct
{ {
size_t len; size_t len;
char *prod; char *prod;
} td_entry; } td_entry;
#define TD_ENTRY(PROD) { sizeof(PROD) - 1, (PROD) } #define TD_ENTRY(PROD) { sizeof(PROD) - 1, (PROD) }
@ -181,30 +181,30 @@ static td_entry td_parser_table[JSON_NUM_NONTERMINALS][JSON_NUM_TERMINALS] =
{ {
/* JSON */ /* JSON */
[OFS(JSON_NT_JSON)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_SCALAR_STRING), [OFS(JSON_NT_JSON)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_SCALAR_STRING),
[OFS(JSON_NT_JSON)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_SCALAR_NUMBER), [OFS(JSON_NT_JSON)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_SCALAR_NUMBER),
[OFS(JSON_NT_JSON)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_SCALAR_TRUE), [OFS(JSON_NT_JSON)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_SCALAR_TRUE),
[OFS(JSON_NT_JSON)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_SCALAR_FALSE), [OFS(JSON_NT_JSON)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_SCALAR_FALSE),
[OFS(JSON_NT_JSON)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_SCALAR_NULL), [OFS(JSON_NT_JSON)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_SCALAR_NULL),
[OFS(JSON_NT_JSON)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY), [OFS(JSON_NT_JSON)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY),
[OFS(JSON_NT_JSON)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_OBJECT), [OFS(JSON_NT_JSON)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_OBJECT),
/* ARRAY_ELEMENTS */ /* ARRAY_ELEMENTS */
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS), [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS), [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS), [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS), [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS), [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS), [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS), [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON), [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* MORE_ARRAY_ELEMENTS */ /* MORE_ARRAY_ELEMENTS */
[OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_ARRAY_ELEMENTS), [OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_ARRAY_ELEMENTS),
[OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON), [OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* KEY_PAIRS */ /* KEY_PAIRS */
[OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_KEY_PAIRS), [OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_KEY_PAIRS),
[OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON), [OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* MORE_KEY_PAIRS */ /* MORE_KEY_PAIRS */
[OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_KEY_PAIRS), [OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_KEY_PAIRS),
[OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON), [OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
}; };
/* the GOAL production. Not stored in the table, but will be the initial contents of the prediction stack */ /* the GOAL production. Not stored in the table, but will be the initial contents of the prediction stack */

View File

@ -28,7 +28,7 @@ static size_t convert_case(char *dst, size_t dstsize, const char *src, ssize_t s
pg_wchar pg_wchar
unicode_lowercase_simple(pg_wchar code) unicode_lowercase_simple(pg_wchar code)
{ {
const pg_case_map *map = find_case_map(code); const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseLower] : code; return map ? map->simplemap[CaseLower] : code;
} }
@ -36,7 +36,7 @@ unicode_lowercase_simple(pg_wchar code)
pg_wchar pg_wchar
unicode_titlecase_simple(pg_wchar code) unicode_titlecase_simple(pg_wchar code)
{ {
const pg_case_map *map = find_case_map(code); const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseTitle] : code; return map ? map->simplemap[CaseTitle] : code;
} }
@ -44,7 +44,7 @@ unicode_titlecase_simple(pg_wchar code)
pg_wchar pg_wchar
unicode_uppercase_simple(pg_wchar code) unicode_uppercase_simple(pg_wchar code)
{ {
const pg_case_map *map = find_case_map(code); const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseUpper] : code; return map ? map->simplemap[CaseUpper] : code;
} }
@ -156,7 +156,7 @@ convert_case(char *dst, size_t dstsize, const char *src, ssize_t srclen,
{ {
pg_wchar u1 = utf8_to_unicode((unsigned char *) src + srcoff); pg_wchar u1 = utf8_to_unicode((unsigned char *) src + srcoff);
int u1len = unicode_utf8len(u1); int u1len = unicode_utf8len(u1);
const pg_case_map *casemap = find_case_map(u1); const pg_case_map *casemap = find_case_map(u1);
if (str_casekind == CaseTitle) if (str_casekind == CaseTitle)
{ {
@ -210,7 +210,7 @@ find_case_map(pg_wchar ucs)
Assert(lengthof(case_map) >= 0x80); Assert(lengthof(case_map) >= 0x80);
if (ucs < 0x80) if (ucs < 0x80)
{ {
const pg_case_map *map = &case_map[ucs]; const pg_case_map *map = &case_map[ucs];
Assert(map->codepoint == ucs); Assert(map->codepoint == ucs);
return map; return map;

View File

@ -75,7 +75,7 @@
#define PG_U_CHARACTER_TAB 0x09 #define PG_U_CHARACTER_TAB 0x09
static bool range_search(const pg_unicode_range * tbl, size_t size, static bool range_search(const pg_unicode_range *tbl, size_t size,
pg_wchar code); pg_wchar code);
/* /*
@ -478,7 +478,7 @@ unicode_category_abbrev(pg_unicode_category category)
* given table. * given table.
*/ */
static bool static bool
range_search(const pg_unicode_range * tbl, size_t size, pg_wchar code) range_search(const pg_unicode_range *tbl, size_t size, pg_wchar code)
{ {
int min = 0; int min = 0;
int mid; int mid;

View File

@ -47,9 +47,10 @@
aggfinalfn => 'interval_avg', aggcombinefn => 'interval_avg_combine', aggfinalfn => 'interval_avg', aggcombinefn => 'interval_avg_combine',
aggserialfn => 'interval_avg_serialize', aggserialfn => 'interval_avg_serialize',
aggdeserialfn => 'interval_avg_deserialize', aggdeserialfn => 'interval_avg_deserialize',
aggmtransfn => 'interval_avg_accum', aggminvtransfn => 'interval_avg_accum_inv', aggmtransfn => 'interval_avg_accum',
aggmfinalfn => 'interval_avg', aggtranstype => 'internal', aggminvtransfn => 'interval_avg_accum_inv', aggmfinalfn => 'interval_avg',
aggtransspace => '40', aggmtranstype => 'internal', aggmtransspace => '40' }, aggtranstype => 'internal', aggtransspace => '40',
aggmtranstype => 'internal', aggmtransspace => '40' },
# sum # sum
{ aggfnoid => 'sum(int8)', aggtransfn => 'int8_avg_accum', { aggfnoid => 'sum(int8)', aggtransfn => 'int8_avg_accum',
@ -77,9 +78,10 @@
aggfinalfn => 'interval_sum', aggcombinefn => 'interval_avg_combine', aggfinalfn => 'interval_sum', aggcombinefn => 'interval_avg_combine',
aggserialfn => 'interval_avg_serialize', aggserialfn => 'interval_avg_serialize',
aggdeserialfn => 'interval_avg_deserialize', aggdeserialfn => 'interval_avg_deserialize',
aggmtransfn => 'interval_avg_accum', aggminvtransfn => 'interval_avg_accum_inv', aggmtransfn => 'interval_avg_accum',
aggmfinalfn => 'interval_sum', aggtranstype => 'internal', aggminvtransfn => 'interval_avg_accum_inv', aggmfinalfn => 'interval_sum',
aggtransspace => '40', aggmtranstype => 'internal', aggmtransspace => '40'}, aggtranstype => 'internal', aggtransspace => '40',
aggmtranstype => 'internal', aggmtransspace => '40' },
{ aggfnoid => 'sum(numeric)', aggtransfn => 'numeric_avg_accum', { aggfnoid => 'sum(numeric)', aggtransfn => 'numeric_avg_accum',
aggfinalfn => 'numeric_sum', aggcombinefn => 'numeric_avg_combine', aggfinalfn => 'numeric_sum', aggcombinefn => 'numeric_avg_combine',
aggserialfn => 'numeric_avg_serialize', aggserialfn => 'numeric_avg_serialize',

View File

@ -30,7 +30,8 @@
descr => 'sorts using the Unicode Collation Algorithm with default settings', descr => 'sorts using the Unicode Collation Algorithm with default settings',
collname => 'unicode', collprovider => 'i', collencoding => '-1', collname => 'unicode', collprovider => 'i', collencoding => '-1',
colllocale => 'und' }, colllocale => 'und' },
{ oid => '811', descr => 'sorts by Unicode code point; Unicode and POSIX character semantics', { oid => '811',
descr => 'sorts by Unicode code point; Unicode and POSIX character semantics',
collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6', collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6',
colllocale => 'C.UTF-8', collversion => '1' }, colllocale => 'C.UTF-8', collversion => '1' },

View File

@ -16,9 +16,9 @@
descr => 'default template for new databases', descr => 'default template for new databases',
datname => 'template1', encoding => 'ENCODING', datname => 'template1', encoding => 'ENCODING',
datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't', datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
datallowconn => 't', dathasloginevt => 'f', datconnlimit => '-1', datfrozenxid => '0', datallowconn => 't', dathasloginevt => 'f', datconnlimit => '-1',
datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE', datfrozenxid => '0', datminmxid => '1', dattablespace => 'pg_default',
datctype => 'LC_CTYPE', datlocale => 'DATLOCALE', datcollate => 'LC_COLLATE', datctype => 'LC_CTYPE', datlocale => 'DATLOCALE',
daticurules => 'ICU_RULES', datacl => '_null_' }, daticurules => 'ICU_RULES', datacl => '_null_' },
] ]

View File

@ -3383,12 +3383,12 @@
prosrc => 'drandom_normal' }, prosrc => 'drandom_normal' },
{ oid => '9719', descr => 'random integer in range', { oid => '9719', descr => 'random integer in range',
proname => 'random', provolatile => 'v', proparallel => 'r', proname => 'random', provolatile => 'v', proparallel => 'r',
prorettype => 'int4', proargtypes => 'int4 int4', prorettype => 'int4', proargtypes => 'int4 int4', proargnames => '{min,max}',
proargnames => '{min,max}', prosrc => 'int4random' }, prosrc => 'int4random' },
{ oid => '9720', descr => 'random bigint in range', { oid => '9720', descr => 'random bigint in range',
proname => 'random', provolatile => 'v', proparallel => 'r', proname => 'random', provolatile => 'v', proparallel => 'r',
prorettype => 'int8', proargtypes => 'int8 int8', prorettype => 'int8', proargtypes => 'int8 int8', proargnames => '{min,max}',
proargnames => '{min,max}', prosrc => 'int8random' }, prosrc => 'int8random' },
{ oid => '9721', descr => 'random numeric in range', { oid => '9721', descr => 'random numeric in range',
proname => 'random', provolatile => 'v', proparallel => 'r', proname => 'random', provolatile => 'v', proparallel => 'r',
prorettype => 'numeric', proargtypes => 'numeric numeric', prorettype => 'numeric', proargtypes => 'numeric numeric',
@ -4932,9 +4932,8 @@
prosrc => 'numeric_poly_stddev_samp' }, prosrc => 'numeric_poly_stddev_samp' },
{ oid => '1843', descr => 'aggregate transition function', { oid => '1843', descr => 'aggregate transition function',
proname => 'interval_avg_accum', proisstrict => 'f', proname => 'interval_avg_accum', proisstrict => 'f', prorettype => 'internal',
prorettype => 'internal', proargtypes => 'internal interval', proargtypes => 'internal interval', prosrc => 'interval_avg_accum' },
prosrc => 'interval_avg_accum' },
{ oid => '3325', descr => 'aggregate combine function', { oid => '3325', descr => 'aggregate combine function',
proname => 'interval_avg_combine', proisstrict => 'f', proname => 'interval_avg_combine', proisstrict => 'f',
prorettype => 'internal', proargtypes => 'internal internal', prorettype => 'internal', proargtypes => 'internal internal',
@ -5743,13 +5742,15 @@
prosrc => 'pg_stat_get_checkpointer_restartpoints_timed' }, prosrc => 'pg_stat_get_checkpointer_restartpoints_timed' },
{ oid => '8744', { oid => '8744',
descr => 'statistics: number of backend requested restartpoints started by the checkpointer', descr => 'statistics: number of backend requested restartpoints started by the checkpointer',
proname => 'pg_stat_get_checkpointer_restartpoints_requested', provolatile => 's', proname => 'pg_stat_get_checkpointer_restartpoints_requested',
proparallel => 'r', prorettype => 'int8', proargtypes => '', provolatile => 's', proparallel => 'r', prorettype => 'int8',
proargtypes => '',
prosrc => 'pg_stat_get_checkpointer_restartpoints_requested' }, prosrc => 'pg_stat_get_checkpointer_restartpoints_requested' },
{ oid => '8745', { oid => '8745',
descr => 'statistics: number of backend performed restartpoints', descr => 'statistics: number of backend performed restartpoints',
proname => 'pg_stat_get_checkpointer_restartpoints_performed', provolatile => 's', proname => 'pg_stat_get_checkpointer_restartpoints_performed',
proparallel => 'r', prorettype => 'int8', proargtypes => '', provolatile => 's', proparallel => 'r', prorettype => 'int8',
proargtypes => '',
prosrc => 'pg_stat_get_checkpointer_restartpoints_performed' }, prosrc => 'pg_stat_get_checkpointer_restartpoints_performed' },
{ oid => '2771', { oid => '2771',
descr => 'statistics: number of buffers written during checkpoints and restartpoints', descr => 'statistics: number of buffers written during checkpoints and restartpoints',
@ -7466,8 +7467,9 @@
proname => 'pg_column_compression', provolatile => 's', prorettype => 'text', proname => 'pg_column_compression', provolatile => 's', prorettype => 'text',
proargtypes => 'any', prosrc => 'pg_column_compression' }, proargtypes => 'any', prosrc => 'pg_column_compression' },
{ oid => '8393', descr => 'chunk ID of on-disk TOASTed value', { oid => '8393', descr => 'chunk ID of on-disk TOASTed value',
proname => 'pg_column_toast_chunk_id', provolatile => 's', prorettype => 'oid', proname => 'pg_column_toast_chunk_id', provolatile => 's',
proargtypes => 'any', prosrc => 'pg_column_toast_chunk_id' }, prorettype => 'oid', proargtypes => 'any',
prosrc => 'pg_column_toast_chunk_id' },
{ oid => '2322', { oid => '2322',
descr => 'total disk space usage for the specified tablespace', descr => 'total disk space usage for the specified tablespace',
proname => 'pg_tablespace_size', provolatile => 'v', prorettype => 'int8', proname => 'pg_tablespace_size', provolatile => 'v', prorettype => 'int8',
@ -8837,8 +8839,8 @@
proname => 'text', prorettype => 'text', proargtypes => 'xml', proname => 'text', prorettype => 'text', proargtypes => 'xml',
prosrc => 'xmltotext' }, prosrc => 'xmltotext' },
{ oid => '3813', descr => 'generate XML text node', { oid => '3813', descr => 'generate XML text node',
proname => 'xmltext', proisstrict => 't', prorettype => 'xml', proname => 'xmltext', prorettype => 'xml', proargtypes => 'text',
proargtypes => 'text', prosrc => 'xmltext' }, prosrc => 'xmltext' },
{ oid => '2923', descr => 'map table contents to XML', { oid => '2923', descr => 'map table contents to XML',
proname => 'table_to_xml', procost => '100', provolatile => 's', proname => 'table_to_xml', procost => '100', provolatile => 's',
@ -10054,8 +10056,8 @@
prorettype => 'anyelement', proargtypes => 'anyelement jsonb', prorettype => 'anyelement', proargtypes => 'anyelement jsonb',
prosrc => 'jsonb_populate_record' }, prosrc => 'jsonb_populate_record' },
{ oid => '9558', descr => 'test get record fields from a jsonb object', { oid => '9558', descr => 'test get record fields from a jsonb object',
proname => 'jsonb_populate_record_valid', proisstrict => 'f', provolatile => 's', proname => 'jsonb_populate_record_valid', proisstrict => 'f',
prorettype => 'bool', proargtypes => 'anyelement jsonb', provolatile => 's', prorettype => 'bool', proargtypes => 'anyelement jsonb',
prosrc => 'jsonb_populate_record_valid' }, prosrc => 'jsonb_populate_record_valid' },
{ oid => '3475', { oid => '3475',
descr => 'get set of records with fields from a jsonb array of objects', descr => 'get set of records with fields from a jsonb array of objects',
@ -11233,9 +11235,10 @@
proname => 'pg_logical_emit_message', provolatile => 'v', proparallel => 'u', proname => 'pg_logical_emit_message', provolatile => 'v', proparallel => 'u',
prorettype => 'pg_lsn', proargtypes => 'bool text bytea bool', prorettype => 'pg_lsn', proargtypes => 'bool text bytea bool',
prosrc => 'pg_logical_emit_message_bytea' }, prosrc => 'pg_logical_emit_message_bytea' },
{ oid => '9929', descr => 'sync replication slots from the primary to the standby', { oid => '9929',
proname => 'pg_sync_replication_slots', provolatile => 'v', proparallel => 'u', descr => 'sync replication slots from the primary to the standby',
prorettype => 'void', proargtypes => '', proname => 'pg_sync_replication_slots', provolatile => 'v',
proparallel => 'u', prorettype => 'void', proargtypes => '',
prosrc => 'pg_sync_replication_slots' }, prosrc => 'pg_sync_replication_slots' },
# event triggers # event triggers
@ -11447,7 +11450,8 @@
proname => 'binary_upgrade_logical_slot_has_caught_up', provolatile => 'v', proname => 'binary_upgrade_logical_slot_has_caught_up', provolatile => 'v',
proparallel => 'u', prorettype => 'bool', proargtypes => 'name', proparallel => 'u', prorettype => 'bool', proargtypes => 'name',
prosrc => 'binary_upgrade_logical_slot_has_caught_up' }, prosrc => 'binary_upgrade_logical_slot_has_caught_up' },
{ oid => '8404', descr => 'for use by pg_upgrade (relation for pg_subscription_rel)', { oid => '8404',
descr => 'for use by pg_upgrade (relation for pg_subscription_rel)',
proname => 'binary_upgrade_add_sub_rel_state', proisstrict => 'f', proname => 'binary_upgrade_add_sub_rel_state', proisstrict => 'f',
provolatile => 'v', proparallel => 'u', prorettype => 'void', provolatile => 'v', proparallel => 'u', prorettype => 'void',
proargtypes => 'text oid char pg_lsn', proargtypes => 'text oid char pg_lsn',
@ -11455,8 +11459,7 @@
{ oid => '8405', descr => 'for use by pg_upgrade (remote_lsn for origin)', { oid => '8405', descr => 'for use by pg_upgrade (remote_lsn for origin)',
proname => 'binary_upgrade_replorigin_advance', proisstrict => 'f', proname => 'binary_upgrade_replorigin_advance', proisstrict => 'f',
provolatile => 'v', proparallel => 'u', prorettype => 'void', provolatile => 'v', proparallel => 'u', prorettype => 'void',
proargtypes => 'text pg_lsn', proargtypes => 'text pg_lsn', prosrc => 'binary_upgrade_replorigin_advance' },
prosrc => 'binary_upgrade_replorigin_advance' },
# conversion functions # conversion functions
{ oid => '4302', { oid => '4302',
@ -12161,38 +12164,30 @@
proname => 'any_value_transfn', prorettype => 'anyelement', proname => 'any_value_transfn', prorettype => 'anyelement',
proargtypes => 'anyelement anyelement', prosrc => 'any_value_transfn' }, proargtypes => 'anyelement anyelement', prosrc => 'any_value_transfn' },
{ oid => '8436', { oid => '8436', descr => 'list of available WAL summary files',
descr => 'list of available WAL summary files', proname => 'pg_available_wal_summaries', prorows => '100', proretset => 't',
proname => 'pg_available_wal_summaries', prorows => '100', provolatile => 'v', prorettype => 'record', proargtypes => '',
proretset => 't', provolatile => 'v', proparallel => 's', proallargtypes => '{int8,pg_lsn,pg_lsn}', proargmodes => '{o,o,o}',
prorettype => 'record', proargtypes => '',
proallargtypes => '{int8,pg_lsn,pg_lsn}',
proargmodes => '{o,o,o}',
proargnames => '{tli,start_lsn,end_lsn}', proargnames => '{tli,start_lsn,end_lsn}',
prosrc => 'pg_available_wal_summaries' }, prosrc => 'pg_available_wal_summaries' },
{ oid => '8437', { oid => '8437', descr => 'contents of a WAL summary file',
descr => 'contents of a WAL summary file', proname => 'pg_wal_summary_contents', prorows => '100', proretset => 't',
proname => 'pg_wal_summary_contents', prorows => '100', provolatile => 'v', prorettype => 'record',
proretset => 't', provolatile => 'v', proparallel => 's', proargtypes => 'int8 pg_lsn pg_lsn',
prorettype => 'record', proargtypes => 'int8 pg_lsn pg_lsn',
proallargtypes => '{int8,pg_lsn,pg_lsn,oid,oid,oid,int2,int8,bool}', proallargtypes => '{int8,pg_lsn,pg_lsn,oid,oid,oid,int2,int8,bool}',
proargmodes => '{i,i,i,o,o,o,o,o,o}', proargmodes => '{i,i,i,o,o,o,o,o,o}',
proargnames => '{tli,start_lsn,end_lsn,relfilenode,reltablespace,reldatabase,relforknumber,relblocknumber,is_limit_block}', proargnames => '{tli,start_lsn,end_lsn,relfilenode,reltablespace,reldatabase,relforknumber,relblocknumber,is_limit_block}',
prosrc => 'pg_wal_summary_contents' }, prosrc => 'pg_wal_summary_contents' },
{ oid => '8438', { oid => '8438', descr => 'WAL summarizer state',
descr => 'WAL summarizer state', proname => 'pg_get_wal_summarizer_state', provolatile => 'v',
proname => 'pg_get_wal_summarizer_state',
provolatile => 'v', proparallel => 's',
prorettype => 'record', proargtypes => '', prorettype => 'record', proargtypes => '',
proallargtypes => '{int8,pg_lsn,pg_lsn,int4}', proallargtypes => '{int8,pg_lsn,pg_lsn,int4}', proargmodes => '{o,o,o,o}',
proargmodes => '{o,o,o,o}',
proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}', proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}',
prosrc => 'pg_get_wal_summarizer_state' }, prosrc => 'pg_get_wal_summarizer_state' },
# GiST stratnum implementations # GiST stratnum implementations
{ oid => '8047', descr => 'GiST support', { oid => '8047', descr => 'GiST support',
proname => 'gist_stratnum_identity', prorettype => 'int2', proname => 'gist_stratnum_identity', prorettype => 'int2',
proargtypes => 'int2', proargtypes => 'int2', prosrc => 'gist_stratnum_identity' },
prosrc => 'gist_stratnum_identity' },
] ]

View File

@ -206,7 +206,8 @@
typname => 'polygon', typlen => '-1', typbyval => 'f', typcategory => 'G', typname => 'polygon', typlen => '-1', typbyval => 'f', typcategory => 'G',
typinput => 'poly_in', typoutput => 'poly_out', typreceive => 'poly_recv', typinput => 'poly_in', typoutput => 'poly_out', typreceive => 'poly_recv',
typsend => 'poly_send', typalign => 'd', typstorage => 'x' }, typsend => 'poly_send', typalign => 'd', typstorage => 'x' },
{ oid => '628', array_type_oid => '629', descr => 'geometric line, formats \'{A,B,C}\'/\'[point1,point2]\'', { oid => '628', array_type_oid => '629',
descr => 'geometric line, formats \'{A,B,C}\'/\'[point1,point2]\'',
typname => 'line', typlen => '24', typbyval => 'f', typcategory => 'G', typname => 'line', typlen => '24', typbyval => 'f', typcategory => 'G',
typsubscript => 'raw_array_subscript_handler', typelem => 'float8', typsubscript => 'raw_array_subscript_handler', typelem => 'float8',
typinput => 'line_in', typoutput => 'line_out', typreceive => 'line_recv', typinput => 'line_in', typoutput => 'line_out', typreceive => 'line_recv',
@ -633,9 +634,8 @@
typoutput => 'tsm_handler_out', typreceive => '-', typsend => '-', typoutput => 'tsm_handler_out', typreceive => '-', typsend => '-',
typalign => 'i' }, typalign => 'i' },
{ oid => '269', { oid => '269',
typname => 'table_am_handler',
descr => 'pseudo-type for the result of a table AM handler function', descr => 'pseudo-type for the result of a table AM handler function',
typlen => '4', typbyval => 't', typtype => 'p', typname => 'table_am_handler', typlen => '4', typbyval => 't', typtype => 'p',
typcategory => 'P', typinput => 'table_am_handler_in', typcategory => 'P', typinput => 'table_am_handler_in',
typoutput => 'table_am_handler_out', typreceive => '-', typsend => '-', typoutput => 'table_am_handler_out', typreceive => '-', typsend => '-',
typalign => 'i' }, typalign => 'i' },
@ -687,7 +687,8 @@
typoutput => 'brin_bloom_summary_out', typoutput => 'brin_bloom_summary_out',
typreceive => 'brin_bloom_summary_recv', typsend => 'brin_bloom_summary_send', typreceive => 'brin_bloom_summary_recv', typsend => 'brin_bloom_summary_send',
typalign => 'i', typstorage => 'x', typcollation => 'default' }, typalign => 'i', typstorage => 'x', typcollation => 'default' },
{ oid => '4601', descr => 'pseudo-type representing BRIN minmax-multi summary', { oid => '4601',
descr => 'pseudo-type representing BRIN minmax-multi summary',
typname => 'pg_brin_minmax_multi_summary', typlen => '-1', typbyval => 'f', typname => 'pg_brin_minmax_multi_summary', typlen => '-1', typbyval => 'f',
typcategory => 'Z', typinput => 'brin_minmax_multi_summary_in', typcategory => 'Z', typinput => 'brin_minmax_multi_summary_in',
typoutput => 'brin_minmax_multi_summary_out', typoutput => 'brin_minmax_multi_summary_out',

View File

@ -26,13 +26,13 @@ typedef enum
CaseTitle = 1, CaseTitle = 1,
CaseUpper = 2, CaseUpper = 2,
NCaseKind NCaseKind
} CaseKind; } CaseKind;
typedef struct typedef struct
{ {
pg_wchar codepoint; /* Unicode codepoint */ pg_wchar codepoint; /* Unicode codepoint */
pg_wchar simplemap[NCaseKind]; pg_wchar simplemap[NCaseKind];
} pg_case_map; } pg_case_map;
/* /*
* Case mapping table. Dense for codepoints < 0x80 (enabling fast lookup), * Case mapping table. Dense for codepoints < 0x80 (enabling fast lookup),

View File

@ -23,19 +23,19 @@ typedef struct
uint32 first; /* Unicode codepoint */ uint32 first; /* Unicode codepoint */
uint32 last; /* Unicode codepoint */ uint32 last; /* Unicode codepoint */
uint8 category; /* General Category */ uint8 category; /* General Category */
} pg_category_range; } pg_category_range;
typedef struct typedef struct
{ {
uint32 first; /* Unicode codepoint */ uint32 first; /* Unicode codepoint */
uint32 last; /* Unicode codepoint */ uint32 last; /* Unicode codepoint */
} pg_unicode_range; } pg_unicode_range;
typedef struct typedef struct
{ {
uint8 category; uint8 category;
uint8 properties; uint8 properties;
} pg_unicode_properties; } pg_unicode_properties;
/* /*
* The properties currently used, in no particular order. Fits in a uint8, but * The properties currently used, in no particular order. Fits in a uint8, but

View File

@ -86,7 +86,8 @@ if (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\blibpq_encryption\b/)
# Only run the GSSAPI tests when compiled with GSSAPI support and # Only run the GSSAPI tests when compiled with GSSAPI support and
# PG_TEST_EXTRA includes 'kerberos' # PG_TEST_EXTRA includes 'kerberos'
my $gss_supported = $ENV{with_gssapi} eq 'yes'; my $gss_supported = $ENV{with_gssapi} eq 'yes';
my $kerberos_enabled = $ENV{PG_TEST_EXTRA} && $ENV{PG_TEST_EXTRA} =~ /\bkerberos\b/; my $kerberos_enabled =
$ENV{PG_TEST_EXTRA} && $ENV{PG_TEST_EXTRA} =~ /\bkerberos\b/;
my $ssl_supported = $ENV{with_ssl} eq 'openssl'; my $ssl_supported = $ENV{with_ssl} eq 'openssl';
### ###
@ -127,7 +128,8 @@ if ($gss_supported != 0 && $kerberos_enabled != 0)
my $realm = 'EXAMPLE.COM'; my $realm = 'EXAMPLE.COM';
$krb = PostgreSQL::Test::Kerberos->new($host, $hostaddr, $realm); $krb = PostgreSQL::Test::Kerberos->new($host, $hostaddr, $realm);
$node->append_conf('postgresql.conf', "krb_server_keyfile = '$krb->{keytab}'\n"); $node->append_conf('postgresql.conf',
"krb_server_keyfile = '$krb->{keytab}'\n");
} }
if ($ssl_supported != 0) if ($ssl_supported != 0)
@ -159,7 +161,8 @@ chomp($unixdir);
# Helper function that returns the encryption method in use in the # Helper function that returns the encryption method in use in the
# connection. # connection.
$node->safe_psql('postgres', q{ $node->safe_psql(
'postgres', q{
CREATE FUNCTION current_enc() RETURNS text LANGUAGE plpgsql AS $$ CREATE FUNCTION current_enc() RETURNS text LANGUAGE plpgsql AS $$
DECLARE DECLARE
ssl_in_use bool; ssl_in_use bool;
@ -206,7 +209,8 @@ $node->reload;
# Ok, all prepared. Run the tests. # Ok, all prepared. Run the tests.
my @all_test_users = ('testuser', 'ssluser', 'nossluser', 'gssuser', 'nogssuser'); my @all_test_users =
('testuser', 'ssluser', 'nossluser', 'gssuser', 'nogssuser');
my @all_gssencmodes = ('disable', 'prefer', 'require'); my @all_gssencmodes = ('disable', 'prefer', 'require');
my @all_sslmodes = ('disable', 'allow', 'prefer', 'require'); my @all_sslmodes = ('disable', 'allow', 'prefer', 'require');
my @all_sslnegotiations = ('postgres', 'direct', 'requiredirect'); my @all_sslnegotiations = ('postgres', 'direct', 'requiredirect');
@ -220,7 +224,8 @@ my $server_config = {
### Run tests with GSS and SSL disabled in the server ### Run tests with GSS and SSL disabled in the server
### ###
my $test_table; my $test_table;
if ($ssl_supported) { if ($ssl_supported)
{
$test_table = q{ $test_table = q{
# USER GSSENCMODE SSLMODE SSLNEGOTIATION EVENTS -> OUTCOME # USER GSSENCMODE SSLMODE SSLNEGOTIATION EVENTS -> OUTCOME
testuser disable disable * connect, authok -> plain testuser disable disable * connect, authok -> plain
@ -240,7 +245,9 @@ testuser disable disable * connect, authok
. . . direct connect, directsslreject, reconnect, sslreject -> fail . . . direct connect, directsslreject, reconnect, sslreject -> fail
. . . requiredirect connect, directsslreject -> fail . . . requiredirect connect, directsslreject -> fail
}; };
} else { }
else
{
# Compiled without SSL support # Compiled without SSL support
$test_table = q{ $test_table = q{
# USER GSSENCMODE SSLMODE SSLNEGOTIATION EVENTS -> OUTCOME # USER GSSENCMODE SSLMODE SSLNEGOTIATION EVENTS -> OUTCOME
@ -268,8 +275,8 @@ testuser require * * - -> fail
note("Running tests with SSL and GSS disabled in the server"); note("Running tests with SSL and GSS disabled in the server");
test_matrix($node, $server_config, test_matrix($node, $server_config,
['testuser'], \@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations, ['testuser'], \@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations,
parse_table($test_table)); parse_table($test_table));
### ###
@ -317,10 +324,11 @@ nossluser . disable * connect, authok
$server_config->{server_ssl} = 1; $server_config->{server_ssl} = 1;
note("Running tests with SSL enabled in server"); note("Running tests with SSL enabled in server");
test_matrix($node, $server_config, test_matrix(
['testuser', 'ssluser', 'nossluser'], $node, $server_config,
['disable'], \@all_sslmodes, \@all_sslnegotiations, [ 'testuser', 'ssluser', 'nossluser' ], ['disable'],
parse_table($test_table)); \@all_sslmodes, \@all_sslnegotiations,
parse_table($test_table));
# Disable SSL again # Disable SSL again
$node->adjust_conf('postgresql.conf', 'ssl', 'off'); $node->adjust_conf('postgresql.conf', 'ssl', 'off');
@ -399,17 +407,20 @@ nogssuser disable disable * connect, authok
# even connecting to the server. Skip those, because we tested # even connecting to the server. Skip those, because we tested
# them earlier already. # them earlier already.
my ($sslmodes, $sslnegotiations); my ($sslmodes, $sslnegotiations);
if ($ssl_supported != 0) { if ($ssl_supported != 0)
($sslmodes, $sslnegotiations) = (\@all_sslmodes, \@all_sslnegotiations); {
} else { ($sslmodes, $sslnegotiations) =
(\@all_sslmodes, \@all_sslnegotiations);
}
else
{
($sslmodes, $sslnegotiations) = (['disable'], ['postgres']); ($sslmodes, $sslnegotiations) = (['disable'], ['postgres']);
} }
note("Running tests with GSS enabled in server"); note("Running tests with GSS enabled in server");
test_matrix($node, $server_config, test_matrix($node, $server_config, [ 'testuser', 'gssuser', 'nogssuser' ],
['testuser', 'gssuser', 'nogssuser'], \@all_gssencmodes, $sslmodes, $sslnegotiations,
\@all_gssencmodes, $sslmodes, $sslnegotiations, parse_table($test_table));
parse_table($test_table));
} }
### ###
@ -422,7 +433,10 @@ SKIP:
skip "kerberos not enabled in PG_TEST_EXTRA" if $kerberos_enabled == 0; skip "kerberos not enabled in PG_TEST_EXTRA" if $kerberos_enabled == 0;
# Sanity check that GSSAPI is still enabled from previous test. # Sanity check that GSSAPI is still enabled from previous test.
connect_test($node, 'user=testuser gssencmode=prefer sslmode=prefer', 'connect, gssaccept, authok -> gss'); connect_test(
$node,
'user=testuser gssencmode=prefer sslmode=prefer',
'connect, gssaccept, authok -> gss');
# Enable SSL # Enable SSL
$node->adjust_conf('postgresql.conf', 'ssl', 'on'); $node->adjust_conf('postgresql.conf', 'ssl', 'on');
@ -528,10 +542,14 @@ nossluser disable disable * connect, authok
}; };
note("Running tests with both GSS and SSL enabled in server"); note("Running tests with both GSS and SSL enabled in server");
test_matrix($node, $server_config, test_matrix(
['testuser', 'gssuser', 'ssluser', 'nogssuser', 'nossluser'], $node,
\@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations, $server_config,
parse_table($test_table)); [ 'testuser', 'gssuser', 'ssluser', 'nogssuser', 'nossluser' ],
\@all_gssencmodes,
\@all_sslmodes,
\@all_sslnegotiations,
parse_table($test_table));
} }
### ###
@ -543,8 +561,13 @@ SKIP:
# libpq doesn't attempt SSL or GSSAPI over Unix domain # libpq doesn't attempt SSL or GSSAPI over Unix domain
# sockets. The server would reject them too. # sockets. The server would reject them too.
connect_test($node, "user=localuser gssencmode=prefer sslmode=prefer host=$unixdir", 'connect, authok -> plain'); connect_test(
connect_test($node, "user=localuser gssencmode=require sslmode=prefer host=$unixdir", '- -> fail'); $node,
"user=localuser gssencmode=prefer sslmode=prefer host=$unixdir",
'connect, authok -> plain');
connect_test($node,
"user=localuser gssencmode=require sslmode=prefer host=$unixdir",
'- -> fail');
} }
done_testing(); done_testing();
@ -558,7 +581,8 @@ sub test_matrix
local $Test::Builder::Level = $Test::Builder::Level + 1; local $Test::Builder::Level = $Test::Builder::Level + 1;
my ($pg_node, $node_conf, my ($pg_node, $node_conf,
$test_users, $gssencmodes, $sslmodes, $sslnegotiations, %expected) = @_; $test_users, $gssencmodes, $sslmodes, $sslnegotiations, %expected)
= @_;
foreach my $test_user (@{$test_users}) foreach my $test_user (@{$test_users})
{ {
@ -572,10 +596,15 @@ sub test_matrix
{ {
$key = "$test_user $gssencmode $client_mode $negotiation"; $key = "$test_user $gssencmode $client_mode $negotiation";
$expected_events = $expected{$key}; $expected_events = $expected{$key};
if (!defined($expected_events)) { if (!defined($expected_events))
$expected_events = "<line missing from expected output table>"; {
$expected_events =
"<line missing from expected output table>";
} }
connect_test($pg_node, "user=$test_user gssencmode=$gssencmode sslmode=$client_mode sslnegotiation=$negotiation", $expected_events); connect_test(
$pg_node,
"user=$test_user gssencmode=$gssencmode sslmode=$client_mode sslnegotiation=$negotiation",
$expected_events);
} }
} }
} }
@ -594,7 +623,8 @@ sub connect_test
my $connstr_full = ""; my $connstr_full = "";
$connstr_full .= "dbname=postgres " unless $connstr =~ m/dbname=/; $connstr_full .= "dbname=postgres " unless $connstr =~ m/dbname=/;
$connstr_full .= "host=$host hostaddr=$hostaddr " unless $connstr =~ m/host=/; $connstr_full .= "host=$host hostaddr=$hostaddr "
unless $connstr =~ m/host=/;
$connstr_full .= $connstr; $connstr_full .= $connstr;
# Get the current size of the logfile before running the test. # Get the current size of the logfile before running the test.
@ -614,7 +644,7 @@ sub connect_test
my ($ret, $stdout, $stderr) = $node->psql( my ($ret, $stdout, $stderr) = $node->psql(
'postgres', 'postgres',
'', '',
extra_params => ['-w', '-c', 'SELECT current_enc()'], extra_params => [ '-w', '-c', 'SELECT current_enc()' ],
connstr => "$connstr_full", connstr => "$connstr_full",
on_error_stop => 0); on_error_stop => 0);
@ -628,7 +658,8 @@ sub connect_test
# Check that the events and outcome match the expected events and # Check that the events and outcome match the expected events and
# outcome # outcome
my $events_and_outcome = join(', ', @events) . " -> $outcome"; my $events_and_outcome = join(', ', @events) . " -> $outcome";
is($events_and_outcome, $expected_events_and_outcome, $test_name) or diag("$stderr"); is($events_and_outcome, $expected_events_and_outcome, $test_name)
or diag("$stderr");
} }
# Parse a test table. See comment at top of the file for the format. # Parse a test table. See comment at top of the file for the format.
@ -640,7 +671,8 @@ sub parse_table
my %expected; my %expected;
my ($user, $gssencmode, $sslmode, $sslnegotiation); my ($user, $gssencmode, $sslmode, $sslnegotiation);
foreach my $line (@lines) { foreach my $line (@lines)
{
# Trim comments # Trim comments
$line =~ s/#.*$//; $line =~ s/#.*$//;
@ -652,7 +684,8 @@ sub parse_table
# Ignore empty lines (includes comment-only lines) # Ignore empty lines (includes comment-only lines)
next if $line eq ''; next if $line eq '';
$line =~ m/^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S.*)\s*->\s*(\S+)\s*$/ or die "could not parse line \"$line\""; $line =~ m/^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S.*)\s*->\s*(\S+)\s*$/
or die "could not parse line \"$line\"";
$user = $1 unless $1 eq "."; $user = $1 unless $1 eq ".";
$gssencmode = $2 unless $2 eq "."; $gssencmode = $2 unless $2 eq ".";
$sslmode = $3 unless $3 eq "."; $sslmode = $3 unless $3 eq ".";
@ -662,10 +695,12 @@ sub parse_table
my @events = split /,\s*/, $5; my @events = split /,\s*/, $5;
my $outcome = $6; my $outcome = $6;
my $events_str = join(', ', @events); my $events_str = join(', ', @events);
$events_str =~ s/\s+$//; # trim whitespace $events_str =~ s/\s+$//; # trim whitespace
my $events_and_outcome = "$events_str -> $outcome"; my $events_and_outcome = "$events_str -> $outcome";
my %expanded = expand_expected_line($user, $gssencmode, $sslmode, $sslnegotiation, $events_and_outcome); my %expanded =
expand_expected_line($user, $gssencmode, $sslmode, $sslnegotiation,
$events_and_outcome);
%expected = (%expected, %expanded); %expected = (%expected, %expanded);
} }
return %expected; return %expected;
@ -677,23 +712,48 @@ sub expand_expected_line
my ($user, $gssencmode, $sslmode, $sslnegotiation, $expected) = @_; my ($user, $gssencmode, $sslmode, $sslnegotiation, $expected) = @_;
my %result; my %result;
if ($user eq '*') { if ($user eq '*')
foreach my $x (@all_test_users) { {
%result = (%result, expand_expected_line($x, $gssencmode, $sslmode, $sslnegotiation, $expected)); foreach my $x (@all_test_users)
{
%result = (
%result,
expand_expected_line(
$x, $gssencmode, $sslmode, $sslnegotiation, $expected));
} }
} elsif ($gssencmode eq '*') { }
foreach my $x (@all_gssencmodes) { elsif ($gssencmode eq '*')
%result = (%result, expand_expected_line($user, $x, $sslmode, $sslnegotiation, $expected)); {
foreach my $x (@all_gssencmodes)
{
%result = (
%result,
expand_expected_line(
$user, $x, $sslmode, $sslnegotiation, $expected));
} }
} elsif ($sslmode eq '*') { }
foreach my $x (@all_sslmodes) { elsif ($sslmode eq '*')
%result = (%result, expand_expected_line($user, $gssencmode, $x, $sslnegotiation, $expected)); {
foreach my $x (@all_sslmodes)
{
%result = (
%result,
expand_expected_line(
$user, $gssencmode, $x, $sslnegotiation, $expected));
} }
} elsif ($sslnegotiation eq '*') { }
foreach my $x (@all_sslnegotiations) { elsif ($sslnegotiation eq '*')
%result = (%result, expand_expected_line($user, $gssencmode, $sslmode, $x, $expected)); {
foreach my $x (@all_sslnegotiations)
{
%result = (
%result,
expand_expected_line(
$user, $gssencmode, $sslmode, $x, $expected));
} }
} else { }
else
{
$result{"$user $gssencmode $sslmode $sslnegotiation"} = $expected; $result{"$user $gssencmode $sslmode $sslnegotiation"} = $expected;
} }
return %result; return %result;
@ -708,13 +768,18 @@ sub parse_log_events
my @events = (); my @events = ();
my @lines = split /\n/, $log_contents; my @lines = split /\n/, $log_contents;
foreach my $line (@lines) { foreach my $line (@lines)
push @events, "reconnect" if $line =~ /connection received/ && scalar(@events) > 0; {
push @events, "connect" if $line =~ /connection received/ && scalar(@events) == 0; push @events, "reconnect"
if $line =~ /connection received/ && scalar(@events) > 0;
push @events, "connect"
if $line =~ /connection received/ && scalar(@events) == 0;
push @events, "sslaccept" if $line =~ /SSLRequest accepted/; push @events, "sslaccept" if $line =~ /SSLRequest accepted/;
push @events, "sslreject" if $line =~ /SSLRequest rejected/; push @events, "sslreject" if $line =~ /SSLRequest rejected/;
push @events, "directsslaccept" if $line =~ /direct SSL connection accepted/; push @events, "directsslaccept"
push @events, "directsslreject" if $line =~ /direct SSL connection rejected/; if $line =~ /direct SSL connection accepted/;
push @events, "directsslreject"
if $line =~ /direct SSL connection rejected/;
push @events, "gssaccept" if $line =~ /GSSENCRequest accepted/; push @events, "gssaccept" if $line =~ /GSSENCRequest accepted/;
push @events, "gssreject" if $line =~ /GSSENCRequest rejected/; push @events, "gssreject" if $line =~ /GSSENCRequest rejected/;
push @events, "authfail" if $line =~ /no pg_hba.conf entry/; push @events, "authfail" if $line =~ /no pg_hba.conf entry/;
@ -722,8 +787,9 @@ sub parse_log_events
} }
# No events at all is represented by "-" # No events at all is represented by "-"
if (scalar @events == 0) { if (scalar @events == 0)
push @events, "-" {
push @events, "-";
} }
return @events; return @events;

View File

@ -92,7 +92,7 @@ pg_popcount_masked_avx512(const char *buf, int bytes, bits8 mask)
const char *final; const char *final;
int tail_idx; int tail_idx;
__mmask64 bmask = ~UINT64CONST(0); __mmask64 bmask = ~UINT64CONST(0);
const __m512i maskv = _mm512_set1_epi8(mask); const __m512i maskv = _mm512_set1_epi8(mask);
/* /*
* Align buffer down to avoid double load overhead from unaligned access. * Align buffer down to avoid double load overhead from unaligned access.

View File

@ -180,7 +180,8 @@ my %pgdump_runs = (
# (undumped) extension tables # (undumped) extension tables
privileged_internals => { privileged_internals => {
dump_cmd => [ dump_cmd => [
'pg_dump', '--no-sync', "--file=$tempdir/privileged_internals.sql", 'pg_dump', '--no-sync',
"--file=$tempdir/privileged_internals.sql",
# these two tables are irrelevant to the test case # these two tables are irrelevant to the test case
'--exclude-table=regress_pg_dump_schema.external_tab', '--exclude-table=regress_pg_dump_schema.external_tab',
'--exclude-table=regress_pg_dump_schema.extdependtab', '--exclude-table=regress_pg_dump_schema.extdependtab',
@ -222,15 +223,18 @@ my %pgdump_runs = (
}, },
exclude_extension => { exclude_extension => {
dump_cmd => [ dump_cmd => [
'pg_dump', '--no-sync', "--file=$tempdir/exclude_extension.sql", 'pg_dump', '--no-sync',
"--file=$tempdir/exclude_extension.sql",
'--exclude-extension=test_pg_dump', 'postgres', '--exclude-extension=test_pg_dump', 'postgres',
], ],
}, },
exclude_extension_filter => { exclude_extension_filter => {
dump_cmd => [ dump_cmd => [
'pg_dump', '--no-sync', 'pg_dump',
'--no-sync',
"--file=$tempdir/exclude_extension_filter.sql", "--file=$tempdir/exclude_extension_filter.sql",
"--filter=$tempdir/exclude_extension_filter.txt", 'postgres', "--filter=$tempdir/exclude_extension_filter.txt",
'postgres',
], ],
}, },

View File

@ -112,7 +112,7 @@ static rt_node_class_test_elem rt_node_class_tests[] =
* Return the number of keys in the radix tree. * Return the number of keys in the radix tree.
*/ */
static uint64 static uint64
rt_num_entries(rt_radix_tree * tree) rt_num_entries(rt_radix_tree *tree)
{ {
return tree->ctl->num_keys; return tree->ctl->num_keys;
} }
@ -209,7 +209,7 @@ test_basic(rt_node_class_test_elem *test_info, int shift, bool asc)
* false. * false.
*/ */
for (int i = 0; i < children; i++) for (int i = 0; i < children; i++)
EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) & keys[i])); EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) &keys[i]));
rt_stats(radixtree); rt_stats(radixtree);
@ -231,14 +231,14 @@ test_basic(rt_node_class_test_elem *test_info, int shift, bool asc)
TestValueType update = keys[i] + 1; TestValueType update = keys[i] + 1;
/* rt_set should report the key found */ /* rt_set should report the key found */
EXPECT_TRUE(rt_set(radixtree, keys[i], (TestValueType *) & update)); EXPECT_TRUE(rt_set(radixtree, keys[i], (TestValueType *) &update));
} }
/* delete and re-insert keys */ /* delete and re-insert keys */
for (int i = 0; i < children; i++) for (int i = 0; i < children; i++)
{ {
EXPECT_TRUE(rt_delete(radixtree, keys[i])); EXPECT_TRUE(rt_delete(radixtree, keys[i]));
EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) & keys[i])); EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) &keys[i]));
} }
/* look up keys after deleting and re-inserting */ /* look up keys after deleting and re-inserting */

View File

@ -838,20 +838,20 @@ sub init_from_backup
my $data_path = $self->data_dir; my $data_path = $self->data_dir;
if (defined $params{combine_with_prior}) if (defined $params{combine_with_prior})
{ {
my @prior_backups = @{$params{combine_with_prior}}; my @prior_backups = @{ $params{combine_with_prior} };
my @prior_backup_path; my @prior_backup_path;
for my $prior_backup_name (@prior_backups) for my $prior_backup_name (@prior_backups)
{ {
push @prior_backup_path, push @prior_backup_path,
$root_node->backup_dir . '/' . $prior_backup_name; $root_node->backup_dir . '/' . $prior_backup_name;
} }
local %ENV = $self->_get_env(); local %ENV = $self->_get_env();
my @combineargs = ('pg_combinebackup', '-d'); my @combineargs = ('pg_combinebackup', '-d');
if (exists $params{tablespace_map}) if (exists $params{tablespace_map})
{ {
while (my ($olddir, $newdir) = each %{$params{tablespace_map}}) while (my ($olddir, $newdir) = each %{ $params{tablespace_map} })
{ {
push @combineargs, "-T$olddir=$newdir"; push @combineargs, "-T$olddir=$newdir";
} }
@ -872,24 +872,25 @@ sub init_from_backup
# We need to generate a tablespace_map file. # We need to generate a tablespace_map file.
open(my $tsmap, ">", "$data_path/tablespace_map") open(my $tsmap, ">", "$data_path/tablespace_map")
|| die "$data_path/tablespace_map: $!"; || die "$data_path/tablespace_map: $!";
# Extract tarfiles and add tablespace_map entries # Extract tarfiles and add tablespace_map entries
my @tstars = grep { /^\d+.tar/ } my @tstars = grep { /^\d+.tar/ }
PostgreSQL::Test::Utils::slurp_dir($backup_path); PostgreSQL::Test::Utils::slurp_dir($backup_path);
for my $tstar (@tstars) for my $tstar (@tstars)
{ {
my $tsoid = $tstar; my $tsoid = $tstar;
$tsoid =~ s/\.tar$//; $tsoid =~ s/\.tar$//;
die "no tablespace mapping for $tstar" die "no tablespace mapping for $tstar"
if !exists $params{tablespace_map} || if !exists $params{tablespace_map}
!exists $params{tablespace_map}{$tsoid}; || !exists $params{tablespace_map}{$tsoid};
my $newdir = $params{tablespace_map}{$tsoid}; my $newdir = $params{tablespace_map}{$tsoid};
mkdir($newdir) || die "mkdir $newdir: $!"; mkdir($newdir) || die "mkdir $newdir: $!";
PostgreSQL::Test::Utils::system_or_bail($params{tar_program}, 'xf', PostgreSQL::Test::Utils::system_or_bail($params{tar_program},
$backup_path . '/' . $tstar, '-C', $newdir); 'xf', $backup_path . '/' . $tstar,
'-C', $newdir);
my $escaped_newdir = $newdir; my $escaped_newdir = $newdir;
$escaped_newdir =~ s/\\/\\\\/g; $escaped_newdir =~ s/\\/\\\\/g;
@ -906,11 +907,13 @@ sub init_from_backup
# Copy the main backup. If we see a tablespace directory for which we # Copy the main backup. If we see a tablespace directory for which we
# have a tablespace mapping, skip it, but remember that we saw it. # have a tablespace mapping, skip it, but remember that we saw it.
PostgreSQL::Test::RecursiveCopy::copypath($backup_path, $data_path, PostgreSQL::Test::RecursiveCopy::copypath(
$backup_path,
$data_path,
'filterfn' => sub { 'filterfn' => sub {
my ($path) = @_; my ($path) = @_;
if ($path =~ /^pg_tblspc\/(\d+)$/ && if ($path =~ /^pg_tblspc\/(\d+)$/
exists $params{tablespace_map}{$1}) && exists $params{tablespace_map}{$1})
{ {
push @tsoids, $1; push @tsoids, $1;
return 0; return 0;
@ -922,14 +925,14 @@ sub init_from_backup
{ {
# We need to generate a tablespace_map file. # We need to generate a tablespace_map file.
open(my $tsmap, ">", "$data_path/tablespace_map") open(my $tsmap, ">", "$data_path/tablespace_map")
|| die "$data_path/tablespace_map: $!"; || die "$data_path/tablespace_map: $!";
# Now use the list of tablespace links to copy each tablespace. # Now use the list of tablespace links to copy each tablespace.
for my $tsoid (@tsoids) for my $tsoid (@tsoids)
{ {
die "no tablespace mapping for $tsoid" die "no tablespace mapping for $tsoid"
if !exists $params{tablespace_map} || if !exists $params{tablespace_map}
!exists $params{tablespace_map}{$tsoid}; || !exists $params{tablespace_map}{$tsoid};
my $olddir = $backup_path . '/pg_tblspc/' . $tsoid; my $olddir = $backup_path . '/pg_tblspc/' . $tsoid;
my $newdir = $params{tablespace_map}{$tsoid}; my $newdir = $params{tablespace_map}{$tsoid};
@ -1166,9 +1169,8 @@ sub restart
# -w is now the default but having it here does no harm and helps # -w is now the default but having it here does no harm and helps
# compatibility with older versions. # compatibility with older versions.
$ret = PostgreSQL::Test::Utils::system_log( $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-w', '-D',
'pg_ctl', '-w', '-D', $self->data_dir, $self->data_dir, '-l', $self->logfile, 'restart');
'-l', $self->logfile, 'restart');
if ($ret != 0) if ($ret != 0)
{ {
@ -3370,19 +3372,21 @@ sub validate_slot_inactive_since
my ($self, $slot_name, $reference_time) = @_; my ($self, $slot_name, $reference_time) = @_;
my $name = $self->name; my $name = $self->name;
my $inactive_since = $self->safe_psql('postgres', my $inactive_since = $self->safe_psql(
'postgres',
qq(SELECT inactive_since FROM pg_replication_slots qq(SELECT inactive_since FROM pg_replication_slots
WHERE slot_name = '$slot_name' AND inactive_since IS NOT NULL;) WHERE slot_name = '$slot_name' AND inactive_since IS NOT NULL;)
); );
# Check that the inactive_since is sane # Check that the inactive_since is sane
is($self->safe_psql('postgres', is( $self->safe_psql(
qq[SELECT '$inactive_since'::timestamptz > to_timestamp(0) AND 'postgres',
qq[SELECT '$inactive_since'::timestamptz > to_timestamp(0) AND
'$inactive_since'::timestamptz > '$reference_time'::timestamptz;] '$inactive_since'::timestamptz > '$reference_time'::timestamptz;]
), ),
't', 't',
"last inactive time for slot $slot_name is valid on node $name") "last inactive time for slot $slot_name is valid on node $name")
or die "could not validate captured inactive_since for slot $slot_name"; or die "could not validate captured inactive_since for slot $slot_name";
return $inactive_since; return $inactive_since;
} }

View File

@ -10,10 +10,12 @@ use strict;
use warnings FATAL => 'all'; use warnings FATAL => 'all';
use PostgreSQL::Test::Utils; use PostgreSQL::Test::Utils;
our ($krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit, $klist, our (
$kdb5_util, $kadmin_local, $krb5kdc, $krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit,
$krb5_conf, $kdc_conf, $krb5_cache, $krb5_log, $kdc_log, $klist, $kdb5_util, $kadmin_local, $krb5kdc,
$kdc_port, $kdc_datadir, $kdc_pidfile, $keytab); $krb5_conf, $kdc_conf, $krb5_cache, $krb5_log,
$kdc_log, $kdc_port, $kdc_datadir, $kdc_pidfile,
$keytab);
INIT INIT
{ {
@ -178,7 +180,8 @@ $realm = {
key_stash_file = $kdc_datadir/_k5.$realm key_stash_file = $kdc_datadir/_k5.$realm
}!); }!);
mkdir $kdc_datadir or BAIL_OUT("could not create directory \"$kdc_datadir\""); mkdir $kdc_datadir
or BAIL_OUT("could not create directory \"$kdc_datadir\"");
# Ensure that we use test's config and cache files, not global ones. # Ensure that we use test's config and cache files, not global ones.
$ENV{'KRB5_CONFIG'} = $krb5_conf; $ENV{'KRB5_CONFIG'} = $krb5_conf;
@ -189,7 +192,8 @@ $realm = {
system_or_bail $kdb5_util, 'create', '-s', '-P', 'secret0'; system_or_bail $kdb5_util, 'create', '-s', '-P', 'secret0';
system_or_bail $kadmin_local, '-q', "addprinc -randkey $service_principal"; system_or_bail $kadmin_local, '-q',
"addprinc -randkey $service_principal";
system_or_bail $kadmin_local, '-q', "ktadd -k $keytab $service_principal"; system_or_bail $kadmin_local, '-q', "ktadd -k $keytab $service_principal";
system_or_bail $krb5kdc, '-P', $kdc_pidfile; system_or_bail $krb5kdc, '-P', $kdc_pidfile;
@ -226,7 +230,8 @@ END
# take care not to change the script's exit value # take care not to change the script's exit value
my $exit_code = $?; my $exit_code = $?;
kill 'INT', `cat $kdc_pidfile` if defined($kdc_pidfile) && -f $kdc_pidfile; kill 'INT', `cat $kdc_pidfile`
if defined($kdc_pidfile) && -f $kdc_pidfile;
$? = $exit_code; $? = $exit_code;
} }

View File

@ -99,9 +99,11 @@ is($result, qq(33|0|t), 'check streamed sequence content on standby 2');
$node_primary->safe_psql('postgres', $node_primary->safe_psql('postgres',
"CREATE UNLOGGED SEQUENCE ulseq; SELECT nextval('ulseq')"); "CREATE UNLOGGED SEQUENCE ulseq; SELECT nextval('ulseq')");
$node_primary->wait_for_replay_catchup($node_standby_1); $node_primary->wait_for_replay_catchup($node_standby_1);
is($node_standby_1->safe_psql('postgres', is( $node_standby_1->safe_psql(
"SELECT pg_sequence_last_value('ulseq'::regclass) IS NULL"), 'postgres',
't', 'pg_sequence_last_value() on unlogged sequence on standby 1'); "SELECT pg_sequence_last_value('ulseq'::regclass) IS NULL"),
't',
'pg_sequence_last_value() on unlogged sequence on standby 1');
# Check that only READ-only queries can run on standbys # Check that only READ-only queries can run on standbys
is($node_standby_1->psql('postgres', 'INSERT INTO tab_int VALUES (1)'), is($node_standby_1->psql('postgres', 'INSERT INTO tab_int VALUES (1)'),

View File

@ -56,7 +56,8 @@ $bravo->safe_psql('postgres', 'checkpoint');
# beyond the previous vacuum. # beyond the previous vacuum.
$alpha->safe_psql('postgres', 'create table test2 (a int, b bytea)'); $alpha->safe_psql('postgres', 'create table test2 (a int, b bytea)');
$alpha->safe_psql('postgres', $alpha->safe_psql('postgres',
q{insert into test2 select generate_series(1,10000), sha256(random()::text::bytea)}); q{insert into test2 select generate_series(1,10000), sha256(random()::text::bytea)}
);
$alpha->safe_psql('postgres', 'truncate test2'); $alpha->safe_psql('postgres', 'truncate test2');
# Wait again for all records to be replayed. # Wait again for all records to be replayed.

View File

@ -443,7 +443,7 @@ $primary4->safe_psql(
# Get inactive_since value after the slot's creation. Note that the slot is # Get inactive_since value after the slot's creation. Note that the slot is
# still inactive till it's used by the standby below. # still inactive till it's used by the standby below.
my $inactive_since = my $inactive_since =
$primary4->validate_slot_inactive_since($sb4_slot, $slot_creation_time); $primary4->validate_slot_inactive_since($sb4_slot, $slot_creation_time);
$standby4->start; $standby4->start;
@ -502,7 +502,7 @@ $publisher4->safe_psql('postgres',
# Get inactive_since value after the slot's creation. Note that the slot is # Get inactive_since value after the slot's creation. Note that the slot is
# still inactive till it's used by the subscriber below. # still inactive till it's used by the subscriber below.
$inactive_since = $inactive_since =
$publisher4->validate_slot_inactive_since($lsub4_slot, $slot_creation_time); $publisher4->validate_slot_inactive_since($lsub4_slot, $slot_creation_time);
$subscriber4->start; $subscriber4->start;
$subscriber4->safe_psql('postgres', $subscriber4->safe_psql('postgres',

View File

@ -178,13 +178,15 @@ sub check_slots_conflict_reason
$res = $node_standby->safe_psql( $res = $node_standby->safe_psql(
'postgres', qq( 'postgres', qq(
select invalidation_reason from pg_replication_slots where slot_name = '$active_slot' and conflicting;)); select invalidation_reason from pg_replication_slots where slot_name = '$active_slot' and conflicting;)
);
is($res, "$reason", "$active_slot reason for conflict is $reason"); is($res, "$reason", "$active_slot reason for conflict is $reason");
$res = $node_standby->safe_psql( $res = $node_standby->safe_psql(
'postgres', qq( 'postgres', qq(
select invalidation_reason from pg_replication_slots where slot_name = '$inactive_slot' and conflicting;)); select invalidation_reason from pg_replication_slots where slot_name = '$inactive_slot' and conflicting;)
);
is($res, "$reason", "$inactive_slot reason for conflict is $reason"); is($res, "$reason", "$inactive_slot reason for conflict is $reason");
} }
@ -559,7 +561,8 @@ check_slots_conflict_reason('vacuum_full_', 'rows_removed');
################################################## ##################################################
# Get the restart_lsn from an invalidated slot # Get the restart_lsn from an invalidated slot
my $restart_lsn = $node_standby->safe_psql('postgres', my $restart_lsn = $node_standby->safe_psql(
'postgres',
"SELECT restart_lsn FROM pg_replication_slots "SELECT restart_lsn FROM pg_replication_slots
WHERE slot_name = 'vacuum_full_activeslot' AND conflicting;" WHERE slot_name = 'vacuum_full_activeslot' AND conflicting;"
); );

View File

@ -42,11 +42,15 @@ like(
qr/FATAL:\s+cannot connect to invalid database "regression_invalid"/, qr/FATAL:\s+cannot connect to invalid database "regression_invalid"/,
"can't connect to invalid database - error message"); "can't connect to invalid database - error message");
is($node->psql('postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'), is( $node->psql(
2, "can't ALTER invalid database"); 'postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'),
2,
"can't ALTER invalid database");
# check invalid database can't be used as a template # check invalid database can't be used as a template
is( $node->psql('postgres', 'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'), is( $node->psql(
'postgres',
'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'),
3, 3,
"can't use invalid database as template"); "can't use invalid database as template");

View File

@ -170,7 +170,8 @@ $standby1->start;
# Capture the inactive_since of the slot from the primary. Note that the slot # Capture the inactive_since of the slot from the primary. Note that the slot
# will be inactive since the corresponding subscription was dropped. # will be inactive since the corresponding subscription was dropped.
my $inactive_since_on_primary = my $inactive_since_on_primary =
$primary->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary); $primary->validate_slot_inactive_since('lsub1_slot',
$slot_creation_time_on_primary);
# Wait for the standby to catch up so that the standby is not lagging behind # Wait for the standby to catch up so that the standby is not lagging behind
# the failover slots. # the failover slots.
@ -190,7 +191,8 @@ is( $standby1->safe_psql(
# Capture the inactive_since of the synced slot on the standby # Capture the inactive_since of the synced slot on the standby
my $inactive_since_on_standby = my $inactive_since_on_standby =
$standby1->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary); $standby1->validate_slot_inactive_since('lsub1_slot',
$slot_creation_time_on_primary);
# Synced slot on the standby must get its own inactive_since # Synced slot on the standby must get its own inactive_since
is( $standby1->safe_psql( is( $standby1->safe_psql(
@ -264,7 +266,8 @@ $primary->safe_psql(
# Capture the inactive_since of the slot from the primary. Note that the slot # Capture the inactive_since of the slot from the primary. Note that the slot
# will be inactive since the corresponding subscription was dropped. # will be inactive since the corresponding subscription was dropped.
$inactive_since_on_primary = $inactive_since_on_primary =
$primary->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary); $primary->validate_slot_inactive_since('lsub1_slot',
$slot_creation_time_on_primary);
# Wait for the standby to catch up so that the standby is not lagging behind # Wait for the standby to catch up so that the standby is not lagging behind
# the failover slots. # the failover slots.
@ -276,8 +279,8 @@ my $log_offset = -s $standby1->logfile;
$standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();"); $standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();");
# Confirm that the invalidated slot has been dropped. # Confirm that the invalidated slot has been dropped.
$standby1->wait_for_log(qr/dropped replication slot "lsub1_slot" of dbid [0-9]+/, $standby1->wait_for_log(
$log_offset); qr/dropped replication slot "lsub1_slot" of dbid [0-9]+/, $log_offset);
# Confirm that the logical slot has been re-created on the standby and is # Confirm that the logical slot has been re-created on the standby and is
# flagged as 'synced' # flagged as 'synced'
@ -336,7 +339,8 @@ ok( $stderr =~
"cannot sync slots if dbname is not specified in primary_conninfo"); "cannot sync slots if dbname is not specified in primary_conninfo");
# Add the dbname back to the primary_conninfo for further tests # Add the dbname back to the primary_conninfo for further tests
$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=postgres'"); $standby1->append_conf('postgresql.conf',
"primary_conninfo = '$connstr_1 dbname=postgres'");
$standby1->reload; $standby1->reload;
################################################## ##################################################
@ -427,19 +431,20 @@ $primary->wait_for_replay_catchup($standby1);
# synced slot. See the test where we promote standby (Promote the standby1 to # synced slot. See the test where we promote standby (Promote the standby1 to
# primary.) # primary.)
$primary->safe_psql('postgres', $primary->safe_psql('postgres',
"SELECT pg_logical_emit_message(false, 'test', 'test');" "SELECT pg_logical_emit_message(false, 'test', 'test');");
);
# Get the confirmed_flush_lsn for the logical slot snap_test_slot on the primary # Get the confirmed_flush_lsn for the logical slot snap_test_slot on the primary
my $confirmed_flush_lsn = $primary->safe_psql('postgres', my $confirmed_flush_lsn = $primary->safe_psql('postgres',
"SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot';"); "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot';"
);
$standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();"); $standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();");
# Verify that confirmed_flush_lsn of snap_test_slot slot is synced to the standby # Verify that confirmed_flush_lsn of snap_test_slot slot is synced to the standby
ok( $standby1->poll_query_until( ok( $standby1->poll_query_until(
'postgres', 'postgres',
"SELECT '$confirmed_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot' AND synced AND NOT temporary;"), "SELECT '$confirmed_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot' AND synced AND NOT temporary;"
),
'confirmed_flush_lsn of slot snap_test_slot synced to standby'); 'confirmed_flush_lsn of slot snap_test_slot synced to standby');
################################################## ##################################################
@ -479,22 +484,24 @@ GRANT USAGE on SCHEMA myschema TO repl_role;
}); });
# Start the standby with changed primary_conninfo. # Start the standby with changed primary_conninfo.
$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=slotsync_test_db user=repl_role'"); $standby1->append_conf('postgresql.conf',
"primary_conninfo = '$connstr_1 dbname=slotsync_test_db user=repl_role'");
$standby1->start; $standby1->start;
# Run the synchronization function. If the sync flow was not prepared # Run the synchronization function. If the sync flow was not prepared
# to handle such attacks, it would have failed during the validation # to handle such attacks, it would have failed during the validation
# of the primary_slot_name itself resulting in # of the primary_slot_name itself resulting in
# ERROR: slot synchronization requires valid primary_slot_name # ERROR: slot synchronization requires valid primary_slot_name
$standby1->safe_psql('slotsync_test_db', "SELECT pg_sync_replication_slots();"); $standby1->safe_psql('slotsync_test_db',
"SELECT pg_sync_replication_slots();");
# Reset the dbname and user in primary_conninfo to the earlier values. # Reset the dbname and user in primary_conninfo to the earlier values.
$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=postgres'"); $standby1->append_conf('postgresql.conf',
"primary_conninfo = '$connstr_1 dbname=postgres'");
$standby1->reload; $standby1->reload;
# Drop the newly created database. # Drop the newly created database.
$primary->psql('postgres', $primary->psql('postgres', q{DROP DATABASE slotsync_test_db;});
q{DROP DATABASE slotsync_test_db;});
################################################## ##################################################
# Test to confirm that the slot sync worker exits on invalid GUC(s) and # Test to confirm that the slot sync worker exits on invalid GUC(s) and
@ -508,20 +515,21 @@ $standby1->append_conf('postgresql.conf', qq(sync_replication_slots = on));
$standby1->reload; $standby1->reload;
# Confirm that the slot sync worker is able to start. # Confirm that the slot sync worker is able to start.
$standby1->wait_for_log(qr/slot sync worker started/, $standby1->wait_for_log(qr/slot sync worker started/, $log_offset);
$log_offset);
$log_offset = -s $standby1->logfile; $log_offset = -s $standby1->logfile;
# Disable another GUC required for slot sync. # Disable another GUC required for slot sync.
$standby1->append_conf( 'postgresql.conf', qq(hot_standby_feedback = off)); $standby1->append_conf('postgresql.conf', qq(hot_standby_feedback = off));
$standby1->reload; $standby1->reload;
# Confirm that slot sync worker acknowledge the GUC change and logs the msg # Confirm that slot sync worker acknowledge the GUC change and logs the msg
# about wrong configuration. # about wrong configuration.
$standby1->wait_for_log(qr/slot sync worker will restart because of a parameter change/, $standby1->wait_for_log(
qr/slot sync worker will restart because of a parameter change/,
$log_offset); $log_offset);
$standby1->wait_for_log(qr/slot synchronization requires hot_standby_feedback to be enabled/, $standby1->wait_for_log(
qr/slot synchronization requires hot_standby_feedback to be enabled/,
$log_offset); $log_offset);
$log_offset = -s $standby1->logfile; $log_offset = -s $standby1->logfile;
@ -531,8 +539,7 @@ $standby1->append_conf('postgresql.conf', "hot_standby_feedback = on");
$standby1->reload; $standby1->reload;
# Confirm that the slot sync worker is able to start now. # Confirm that the slot sync worker is able to start now.
$standby1->wait_for_log(qr/slot sync worker started/, $standby1->wait_for_log(qr/slot sync worker started/, $log_offset);
$log_offset);
################################################## ##################################################
# Test to confirm that confirmed_flush_lsn of the logical slot on the primary # Test to confirm that confirmed_flush_lsn of the logical slot on the primary
@ -557,7 +564,8 @@ $subscriber1->wait_for_subscription_sync;
# Do not allow any further advancement of the confirmed_flush_lsn for the # Do not allow any further advancement of the confirmed_flush_lsn for the
# lsub1_slot. # lsub1_slot.
$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 DISABLE"); $subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 DISABLE");
# Wait for the replication slot to become inactive on the publisher # Wait for the replication slot to become inactive on the publisher
$primary->poll_query_until( $primary->poll_query_until(
@ -567,12 +575,14 @@ $primary->poll_query_until(
# Get the confirmed_flush_lsn for the logical slot lsub1_slot on the primary # Get the confirmed_flush_lsn for the logical slot lsub1_slot on the primary
my $primary_flush_lsn = $primary->safe_psql('postgres', my $primary_flush_lsn = $primary->safe_psql('postgres',
"SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot';"); "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot';"
);
# Confirm that confirmed_flush_lsn of lsub1_slot slot is synced to the standby # Confirm that confirmed_flush_lsn of lsub1_slot slot is synced to the standby
ok( $standby1->poll_query_until( ok( $standby1->poll_query_until(
'postgres', 'postgres',
"SELECT '$primary_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot' AND synced AND NOT temporary;"), "SELECT '$primary_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot' AND synced AND NOT temporary;"
),
'confirmed_flush_lsn of slot lsub1_slot synced to standby'); 'confirmed_flush_lsn of slot lsub1_slot synced to standby');
################################################## ##################################################
@ -636,7 +646,8 @@ $subscriber2->safe_psql(
$subscriber2->wait_for_subscription_sync; $subscriber2->wait_for_subscription_sync;
$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 ENABLE"); $subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 ENABLE");
my $offset = -s $primary->logfile; my $offset = -s $primary->logfile;
@ -674,7 +685,8 @@ $primary->wait_for_log(
# primary and keeps waiting for the standby specified in standby_slot_names # primary and keeps waiting for the standby specified in standby_slot_names
# (sb1_slot aka standby1). # (sb1_slot aka standby1).
$result = $result =
$subscriber1->safe_psql('postgres', "SELECT count(*) <> $primary_row_count FROM tab_int;"); $subscriber1->safe_psql('postgres',
"SELECT count(*) <> $primary_row_count FROM tab_int;");
is($result, 't', is($result, 't',
"subscriber1 doesn't get data from primary until standby1 acknowledges changes" "subscriber1 doesn't get data from primary until standby1 acknowledges changes"
); );
@ -714,7 +726,8 @@ $standby1->stop;
# Disable the regress_mysub1 to prevent the logical walsender from generating # Disable the regress_mysub1 to prevent the logical walsender from generating
# more warnings. # more warnings.
$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 DISABLE"); $subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 DISABLE");
# Wait for the replication slot to become inactive on the publisher # Wait for the replication slot to become inactive on the publisher
$primary->poll_query_until( $primary->poll_query_until(
@ -758,8 +771,7 @@ $primary->reload;
$back_q->quit; $back_q->quit;
$primary->safe_psql('postgres', $primary->safe_psql('postgres',
"SELECT pg_drop_replication_slot('test_slot');" "SELECT pg_drop_replication_slot('test_slot');");
);
# Add the physical slot (sb1_slot) back to the standby_slot_names for further # Add the physical slot (sb1_slot) back to the standby_slot_names for further
# tests. # tests.
@ -767,7 +779,8 @@ $primary->adjust_conf('postgresql.conf', 'standby_slot_names', "'sb1_slot'");
$primary->reload; $primary->reload;
# Enable the regress_mysub1 for further tests # Enable the regress_mysub1 for further tests
$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 ENABLE"); $subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 ENABLE");
################################################## ##################################################
# Test that logical replication will wait for the user-created inactive # Test that logical replication will wait for the user-created inactive
@ -835,14 +848,16 @@ $standby1->promote;
# promotion. We do this check before the slot is enabled on the new primary # promotion. We do this check before the slot is enabled on the new primary
# below, otherwise, the slot gets active setting inactive_since to NULL. # below, otherwise, the slot gets active setting inactive_since to NULL.
my $inactive_since_on_new_primary = my $inactive_since_on_new_primary =
$standby1->validate_slot_inactive_since('lsub1_slot', $promotion_time_on_primary); $standby1->validate_slot_inactive_since('lsub1_slot',
$promotion_time_on_primary);
is( $standby1->safe_psql( is( $standby1->safe_psql(
'postgres', 'postgres',
"SELECT '$inactive_since_on_new_primary'::timestamptz > '$inactive_since_on_primary'::timestamptz" "SELECT '$inactive_since_on_new_primary'::timestamptz > '$inactive_since_on_primary'::timestamptz"
), ),
"t", "t",
'synchronized slot has got its own inactive_since on the new primary after promotion'); 'synchronized slot has got its own inactive_since on the new primary after promotion'
);
# Update subscription with the new primary's connection info # Update subscription with the new primary's connection info
my $standby1_conninfo = $standby1->connstr . ' dbname=postgres'; my $standby1_conninfo = $standby1->connstr . ' dbname=postgres';
@ -850,8 +865,10 @@ $subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 CONNECTION '$standby1_conninfo';"); "ALTER SUBSCRIPTION regress_mysub1 CONNECTION '$standby1_conninfo';");
# Confirm the synced slot 'lsub1_slot' is retained on the new primary # Confirm the synced slot 'lsub1_slot' is retained on the new primary
is($standby1->safe_psql('postgres', is( $standby1->safe_psql(
q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN ('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}), 'postgres',
q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN ('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}
),
't', 't',
'synced slot retained on the new primary'); 'synced slot retained on the new primary');
@ -861,9 +878,8 @@ $standby1->safe_psql('postgres',
$standby1->wait_for_catchup('regress_mysub1'); $standby1->wait_for_catchup('regress_mysub1');
# Confirm that data in tab_int replicated on the subscriber # Confirm that data in tab_int replicated on the subscriber
is( $subscriber1->safe_psql('postgres', q{SELECT count(*) FROM tab_int;}), is($subscriber1->safe_psql('postgres', q{SELECT count(*) FROM tab_int;}),
"20", "20", 'data replicated from the new primary');
'data replicated from the new primary');
# Consume the data from the snap_test_slot. The synced slot should reach a # Consume the data from the snap_test_slot. The synced slot should reach a
# consistent point by restoring the snapshot at the restart_lsn serialized # consistent point by restoring the snapshot at the restart_lsn serialized

View File

@ -86,7 +86,8 @@ switch_server_cert(
restart => 'no'); restart => 'no');
$result = $node->restart(fail_ok => 1); $result = $node->restart(fail_ok => 1);
is($result, 0, 'restart fails with password-protected key file with wrong password'); is($result, 0,
'restart fails with password-protected key file with wrong password');
switch_server_cert( switch_server_cert(
$node, $node,

View File

@ -48,7 +48,8 @@ is($result, qq(2|2|2), 'check initial data was copied to subscriber');
# Update the rows on the publisher and check the additional columns on # Update the rows on the publisher and check the additional columns on
# subscriber didn't change # subscriber didn't change
$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = encode(sha256(b::bytea), 'hex')"); $node_publisher->safe_psql('postgres',
"UPDATE test_tab SET b = encode(sha256(b::bytea), 'hex')");
$node_publisher->wait_for_catchup('tap_sub'); $node_publisher->wait_for_catchup('tap_sub');

View File

@ -32,7 +32,8 @@ $node_publisher->safe_psql('postgres',
# Setup structure on subscriber # Setup structure on subscriber
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE TABLE test_tab (a int primary key, b bytea, c INT, d INT, e INT)"); "CREATE TABLE test_tab (a int primary key, b bytea, c INT, d INT, e INT)"
);
# Setup logical replication # Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';

View File

@ -288,8 +288,7 @@ is( $node_subscriber->safe_psql(
# Since disabling subscription doesn't wait for walsender to release the replication # Since disabling subscription doesn't wait for walsender to release the replication
# slot and exit, wait for the slot to become inactive. # slot and exit, wait for the slot to become inactive.
$node_publisher->poll_query_until( $node_publisher->poll_query_until($db,
$db,
qq(SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '$sub2_name' AND active_pid IS NULL)) qq(SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '$sub2_name' AND active_pid IS NULL))
) or die "slot never became inactive"; ) or die "slot never became inactive";

View File

@ -166,7 +166,8 @@ BEGIN;
INSERT INTO tbl SELECT i, sha256(i::text::bytea) FROM generate_series(1, 10000) s(i); INSERT INTO tbl SELECT i, sha256(i::text::bytea) FROM generate_series(1, 10000) s(i);
COMMIT; COMMIT;
]); ]);
test_skip_lsn($node_publisher, $node_subscriber, "(4, sha256(4::text::bytea))", test_skip_lsn($node_publisher, $node_subscriber,
"(4, sha256(4::text::bytea))",
"4", "test skipping stream-commit"); "4", "test skipping stream-commit");
$result = $node_subscriber->safe_psql('postgres', $result = $node_subscriber->safe_psql('postgres',

View File

@ -490,7 +490,8 @@ $node_publisher->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE TABLE test_replica_id_full (x int, y text)"); "CREATE TABLE test_replica_id_full (x int, y text)");
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE INDEX test_replica_id_full_idx ON test_replica_id_full USING HASH (x)"); "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full USING HASH (x)"
);
# insert some initial data # insert some initial data
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',

View File

@ -207,10 +207,7 @@ GRANT regress_alice TO regress_admin WITH INHERIT FALSE, SET TRUE;
# the above grant doesn't help. # the above grant doesn't help.
publish_insert("alice.unpartitioned", 14); publish_insert("alice.unpartitioned", 14);
expect_failure( expect_failure(
"alice.unpartitioned", "alice.unpartitioned", 3, 7, 13,
3,
7,
13,
qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
"with no privileges cannot replicate"); "with no privileges cannot replicate");

View File

@ -469,23 +469,22 @@ $node_subscriber->safe_psql(
)); ));
$node_subscriber->wait_for_subscription_sync($node_publisher, 'sub1'); $node_subscriber->wait_for_subscription_sync($node_publisher, 'sub1');
$result = $node_subscriber->safe_psql('postgres', $result =
"SELECT a, b FROM tab_default"); $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab_default");
is($result, qq(1|f is( $result, qq(1|f
2|t), 'check snapshot on subscriber'); 2|t), 'check snapshot on subscriber');
# Update all rows in the table and ensure the rows with the missing `b` # Update all rows in the table and ensure the rows with the missing `b`
# attribute replicate correctly. # attribute replicate correctly.
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "UPDATE tab_default SET a = a + 1");
"UPDATE tab_default SET a = a + 1");
$node_publisher->wait_for_catchup('sub1'); $node_publisher->wait_for_catchup('sub1');
# When the bug is present, the `1|f` row will not be updated to `2|f` because # When the bug is present, the `1|f` row will not be updated to `2|f` because
# the publisher incorrectly fills in `NULL` for `b` and publishes an update # the publisher incorrectly fills in `NULL` for `b` and publishes an update
# for `1|NULL`, which doesn't exist in the subscriber. # for `1|NULL`, which doesn't exist in the subscriber.
$result = $node_subscriber->safe_psql('postgres', $result =
"SELECT a, b FROM tab_default"); $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab_default");
is($result, qq(2|f is( $result, qq(2|f
3|t), 'check replicated update on subscriber'); 3|t), 'check replicated update on subscriber');
$node_publisher->stop('fast'); $node_publisher->stop('fast');

View File

@ -231,7 +231,6 @@ BUF_MEM
BYTE BYTE
BY_HANDLE_FILE_INFORMATION BY_HANDLE_FILE_INFORMATION
Backend Backend
BackendId
BackendParameters BackendParameters
BackendStartupData BackendStartupData
BackendState BackendState
@ -245,7 +244,6 @@ Barrier
BaseBackupCmd BaseBackupCmd
BaseBackupTargetHandle BaseBackupTargetHandle
BaseBackupTargetType BaseBackupTargetType
BasicArchiveData
BeginDirectModify_function BeginDirectModify_function
BeginForeignInsert_function BeginForeignInsert_function
BeginForeignModify_function BeginForeignModify_function
@ -275,10 +273,19 @@ BlockId
BlockIdData BlockIdData
BlockInfoRecord BlockInfoRecord
BlockNumber BlockNumber
BlockRefTable
BlockRefTableBuffer
BlockRefTableChunk
BlockRefTableEntry
BlockRefTableKey
BlockRefTableReader
BlockRefTableSerializedEntry
BlockRefTableWriter
BlockSampler BlockSampler
BlockSamplerData BlockSamplerData
BlockedProcData BlockedProcData
BlockedProcsData BlockedProcsData
BlocktableEntry
BloomBuildState BloomBuildState
BloomFilter BloomFilter
BloomMetaPageData BloomMetaPageData
@ -367,6 +374,7 @@ CallStmt
CancelRequestPacket CancelRequestPacket
Cardinality Cardinality
CaseExpr CaseExpr
CaseKind
CaseTestExpr CaseTestExpr
CaseWhen CaseWhen
Cash Cash
@ -483,8 +491,8 @@ CopyFromState
CopyFromStateData CopyFromStateData
CopyHeaderChoice CopyHeaderChoice
CopyInsertMethod CopyInsertMethod
CopyMethod
CopyLogVerbosityChoice CopyLogVerbosityChoice
CopyMethod
CopyMultiInsertBuffer CopyMultiInsertBuffer
CopyMultiInsertInfo CopyMultiInsertInfo
CopyOnErrorChoice CopyOnErrorChoice
@ -560,10 +568,14 @@ DR_intorel
DR_printtup DR_printtup
DR_sqlfunction DR_sqlfunction
DR_transientrel DR_transientrel
DSMRegistryCtxStruct
DSMRegistryEntry
DWORD DWORD
DataDirSyncMethod DataDirSyncMethod
DataDumperPtr DataDumperPtr
DataPageDeleteStack DataPageDeleteStack
DataTypesUsageChecks
DataTypesUsageVersionCheck
DatabaseInfo DatabaseInfo
DateADT DateADT
DateTimeErrorExtra DateTimeErrorExtra
@ -621,8 +633,6 @@ DropSubscriptionStmt
DropTableSpaceStmt DropTableSpaceStmt
DropUserMappingStmt DropUserMappingStmt
DropdbStmt DropdbStmt
DSMRegistryCtxStruct
DSMRegistryEntry
DumpComponents DumpComponents
DumpId DumpId
DumpOptions DumpOptions
@ -758,6 +768,7 @@ FetchStmt
FieldSelect FieldSelect
FieldStore FieldStore
File File
FileBackupMethod
FileFdwExecutionState FileFdwExecutionState
FileFdwPlanState FileFdwPlanState
FileNameMap FileNameMap
@ -1165,9 +1176,11 @@ ImportForeignSchemaType
ImportForeignSchema_function ImportForeignSchema_function
ImportQual ImportQual
InProgressEnt InProgressEnt
InProgressIO
IncludeWal IncludeWal
InclusionOpaque InclusionOpaque
IncrementVarSublevelsUp_context IncrementVarSublevelsUp_context
IncrementalBackupInfo
IncrementalSort IncrementalSort
IncrementalSortExecutionStatus IncrementalSortExecutionStatus
IncrementalSortGroupInfo IncrementalSortGroupInfo
@ -1223,7 +1236,6 @@ InjectionPointConditionType
InjectionPointEntry InjectionPointEntry
InjectionPointSharedState InjectionPointSharedState
InlineCodeBlock InlineCodeBlock
InProgressIO
InsertStmt InsertStmt
Instrumentation Instrumentation
Int128AggState Int128AggState
@ -1249,6 +1261,7 @@ IsForeignScanParallelSafe_function
IsoConnInfo IsoConnInfo
IspellDict IspellDict
Item Item
ItemArray
ItemId ItemId
ItemIdData ItemIdData
ItemPointer ItemPointer
@ -1272,7 +1285,6 @@ Join
JoinCostWorkspace JoinCostWorkspace
JoinDomain JoinDomain
JoinExpr JoinExpr
JsonFuncExpr
JoinHashEntry JoinHashEntry
JoinPath JoinPath
JoinPathExtraData JoinPathExtraData
@ -1299,6 +1311,7 @@ JsonExprOp
JsonExprState JsonExprState
JsonFormat JsonFormat
JsonFormatType JsonFormatType
JsonFuncExpr
JsonHashEntry JsonHashEntry
JsonIncrementalState JsonIncrementalState
JsonIsPredicate JsonIsPredicate
@ -1315,15 +1328,16 @@ JsonManifestWALRangeField
JsonObjectAgg JsonObjectAgg
JsonObjectConstructor JsonObjectConstructor
JsonOutput JsonOutput
JsonParseExpr
JsonParseContext JsonParseContext
JsonParseErrorType JsonParseErrorType
JsonParseExpr
JsonParserStack JsonParserStack
JsonPath JsonPath
JsonPathBool JsonPathBool
JsonPathDatatypeStatus JsonPathCountVarsCallback
JsonPathExecContext JsonPathExecContext
JsonPathExecResult JsonPathExecResult
JsonPathGetVarCallback
JsonPathGinAddPathItemFunc JsonPathGinAddPathItemFunc
JsonPathGinContext JsonPathGinContext
JsonPathGinExtractNodesFunc JsonPathGinExtractNodesFunc
@ -1334,7 +1348,6 @@ JsonPathGinPathItem
JsonPathItem JsonPathItem
JsonPathItemType JsonPathItemType
JsonPathKeyword JsonPathKeyword
JsonPathMutableContext
JsonPathParseItem JsonPathParseItem
JsonPathParseResult JsonPathParseResult
JsonPathPredicateCallback JsonPathPredicateCallback
@ -1398,6 +1411,7 @@ LINE
LLVMAttributeRef LLVMAttributeRef
LLVMBasicBlockRef LLVMBasicBlockRef
LLVMBuilderRef LLVMBuilderRef
LLVMContextRef
LLVMErrorRef LLVMErrorRef
LLVMIntPredicate LLVMIntPredicate
LLVMJITEventListenerRef LLVMJITEventListenerRef
@ -1976,7 +1990,6 @@ ParallelHashJoinBatch
ParallelHashJoinBatchAccessor ParallelHashJoinBatchAccessor
ParallelHashJoinState ParallelHashJoinState
ParallelIndexScanDesc ParallelIndexScanDesc
ParallelReadyList
ParallelSlot ParallelSlot
ParallelSlotArray ParallelSlotArray
ParallelSlotResultHandler ParallelSlotResultHandler
@ -2053,6 +2066,7 @@ PathClauseUsage
PathCostComparison PathCostComparison
PathHashStack PathHashStack
PathKey PathKey
PathKeyInfo
PathKeysComparison PathKeysComparison
PathTarget PathTarget
PatternInfo PatternInfo
@ -2175,7 +2189,6 @@ PortalStrategy
PostParseColumnRefHook PostParseColumnRefHook
PostgresPollingStatusType PostgresPollingStatusType
PostingItem PostingItem
PostmasterChildType
PreParseColumnRefHook PreParseColumnRefHook
PredClass PredClass
PredIterInfo PredIterInfo
@ -2199,6 +2212,7 @@ PrivTarget
PrivateRefCountEntry PrivateRefCountEntry
ProcArrayStruct ProcArrayStruct
ProcLangInfo ProcLangInfo
ProcNumber
ProcSignalBarrierType ProcSignalBarrierType
ProcSignalHeader ProcSignalHeader
ProcSignalReason ProcSignalReason
@ -2217,8 +2231,8 @@ ProjectionPath
PromptInterruptContext PromptInterruptContext
ProtocolVersion ProtocolVersion
PrsStorage PrsStorage
PruneReason
PruneFreezeResult PruneFreezeResult
PruneReason
PruneState PruneState
PruneStepResult PruneStepResult
PsqlScanCallbacks PsqlScanCallbacks
@ -2320,6 +2334,7 @@ ReadFunc
ReadLocalXLogPageNoWaitPrivate ReadLocalXLogPageNoWaitPrivate
ReadReplicationSlotCmd ReadReplicationSlotCmd
ReadStream ReadStream
ReadStreamBlockNumberCB
ReassignOwnedStmt ReassignOwnedStmt
RecheckForeignScan_function RecheckForeignScan_function
RecordCacheArrayEntry RecordCacheArrayEntry
@ -2433,6 +2448,7 @@ ResourceOwnerDesc
ResourceReleaseCallback ResourceReleaseCallback
ResourceReleaseCallbackItem ResourceReleaseCallbackItem
ResourceReleasePhase ResourceReleasePhase
ResourceReleasePriority
RestoreOptions RestoreOptions
RestorePass RestorePass
RestrictInfo RestrictInfo
@ -2696,8 +2712,8 @@ SpecialJoinInfo
SpinDelayStatus SpinDelayStatus
SplitInterval SplitInterval
SplitLR SplitLR
SplitPartitionContext
SplitPageLayout SplitPageLayout
SplitPartitionContext
SplitPoint SplitPoint
SplitTextOutputData SplitTextOutputData
SplitVar SplitVar
@ -2753,6 +2769,7 @@ SubscriptingRefState
Subscription Subscription
SubscriptionInfo SubscriptionInfo
SubscriptionRelState SubscriptionRelState
SummarizerReadLocalXLogPrivate
SupportRequestCost SupportRequestCost
SupportRequestIndexCondition SupportRequestIndexCondition
SupportRequestOptimizeWindowClause SupportRequestOptimizeWindowClause
@ -2761,15 +2778,16 @@ SupportRequestSelectivity
SupportRequestSimplify SupportRequestSimplify
SupportRequestWFuncMonotonic SupportRequestWFuncMonotonic
Syn Syn
SyncingTablesState
SyncOps SyncOps
SyncRepConfigData SyncRepConfigData
SyncRepStandbyData SyncRepStandbyData
SyncRequestHandler SyncRequestHandler
SyncRequestType SyncRequestType
SyncingTablesState
SysFKRelationship SysFKRelationship
SysScanDesc SysScanDesc
SyscacheCallbackFunction SyscacheCallbackFunction
SysloggerStartupData
SystemRowsSamplerData SystemRowsSamplerData
SystemSamplerData SystemSamplerData
SystemTimeSamplerData SystemTimeSamplerData
@ -2868,6 +2886,7 @@ TestDSMRegistryStruct
TestDecodingData TestDecodingData
TestDecodingTxnData TestDecodingTxnData
TestSpec TestSpec
TestValueType
TextFreq TextFreq
TextPositionState TextPositionState
TheLexeme TheLexeme
@ -2882,6 +2901,9 @@ TidRangeScan
TidRangeScanState TidRangeScanState
TidScan TidScan
TidScanState TidScanState
TidStore
TidStoreIter
TidStoreIterResult
TimeADT TimeADT
TimeLineHistoryCmd TimeLineHistoryCmd
TimeLineHistoryEntry TimeLineHistoryEntry
@ -2904,7 +2926,6 @@ TocEntry
TokenAuxData TokenAuxData
TokenizedAuthLine TokenizedAuthLine
TrackItem TrackItem
TransamVariablesData
TransApplyAction TransApplyAction
TransInvalidationInfo TransInvalidationInfo
TransState TransState
@ -2913,6 +2934,7 @@ TransactionState
TransactionStateData TransactionStateData
TransactionStmt TransactionStmt
TransactionStmtKind TransactionStmtKind
TransamVariablesData
TransformInfo TransformInfo
TransformJsonStringValuesState TransformJsonStringValuesState
TransitionCaptureState TransitionCaptureState
@ -2956,7 +2978,6 @@ TupleTableSlotOps
TuplesortClusterArg TuplesortClusterArg
TuplesortDatumArg TuplesortDatumArg
TuplesortIndexArg TuplesortIndexArg
TuplesortIndexBrinArg
TuplesortIndexBTreeArg TuplesortIndexBTreeArg
TuplesortIndexHashArg TuplesortIndexHashArg
TuplesortInstrumentation TuplesortInstrumentation
@ -3009,6 +3030,7 @@ UnresolvedTup
UnresolvedTupData UnresolvedTupData
UpdateContext UpdateContext
UpdateStmt UpdateStmt
UploadManifestCmd
UpperRelationKind UpperRelationKind
UpperUniquePath UpperUniquePath
UserAuth UserAuth
@ -3057,7 +3079,6 @@ VolatileFunctionStatus
Vsrt Vsrt
WAIT_ORDER WAIT_ORDER
WALAvailability WALAvailability
WalInsertClass
WALInsertLock WALInsertLock
WALInsertLockPadded WALInsertLockPadded
WALOpenSegment WALOpenSegment
@ -3090,6 +3111,7 @@ WaitEventTimeout
WaitPMResult WaitPMResult
WalCloseMethod WalCloseMethod
WalCompression WalCompression
WalInsertClass
WalLevel WalLevel
WalRcvData WalRcvData
WalRcvExecResult WalRcvExecResult
@ -3103,6 +3125,9 @@ WalSnd
WalSndCtlData WalSndCtlData
WalSndSendDataCallback WalSndSendDataCallback
WalSndState WalSndState
WalSummarizerData
WalSummaryFile
WalSummaryIO
WalSyncMethod WalSyncMethod
WalTimeSample WalTimeSample
WalUsage WalUsage
@ -3127,6 +3152,7 @@ WindowStatePerAggData
WindowStatePerFunc WindowStatePerFunc
WithCheckOption WithCheckOption
WithClause WithClause
WordBoundaryNext
WordEntry WordEntry
WordEntryIN WordEntryIN
WordEntryPos WordEntryPos
@ -3217,12 +3243,15 @@ ZstdCompressorState
_SPI_connection _SPI_connection
_SPI_plan _SPI_plan
__m128i __m128i
__m512i
__mmask64
__time64_t __time64_t
_dev_t _dev_t
_ino_t _ino_t
_locale_t _locale_t
_resultmap _resultmap
_stringlist _stringlist
access_vector_t
acquireLocksOnSubLinks_context acquireLocksOnSubLinks_context
add_nulling_relids_context add_nulling_relids_context
adjust_appendrel_attrs_context adjust_appendrel_attrs_context
@ -3241,6 +3270,7 @@ amgetbitmap_function
amgettuple_function amgettuple_function
aminitparallelscan_function aminitparallelscan_function
aminsert_function aminsert_function
aminsertcleanup_function
ammarkpos_function ammarkpos_function
amoptions_function amoptions_function
amparallelrescan_function amparallelrescan_function
@ -3255,13 +3285,17 @@ assign_collations_context
auth_password_hook_typ auth_password_hook_typ
autovac_table autovac_table
av_relation av_relation
avc_cache
avl_dbase avl_dbase
avl_node avl_node
avl_tree avl_tree
avw_dbase avw_dbase
backslashResult backslashResult
backup_file_entry
backup_file_hash
backup_manifest_info backup_manifest_info
backup_manifest_option backup_manifest_option
backup_wal_range
base_yy_extra_type base_yy_extra_type
basebackup_options basebackup_options
bbsink bbsink
@ -3295,6 +3329,8 @@ bitmapword
bits16 bits16
bits32 bits32
bits8 bits8
blockreftable_hash
blockreftable_iterator
bloom_filter bloom_filter
boolKEY boolKEY
brin_column_state brin_column_state
@ -3304,6 +3340,10 @@ cached_re_str
canonicalize_state canonicalize_state
cashKEY cashKEY
catalogid_hash catalogid_hash
cb_cleanup_dir
cb_options
cb_tablespace
cb_tablespace_mapping
check_agg_arguments_context check_agg_arguments_context
check_function_callback check_function_callback
check_network_data check_network_data
@ -3370,6 +3410,7 @@ dsa_segment_header
dsa_segment_index dsa_segment_index
dsa_segment_map dsa_segment_map
dshash_compare_function dshash_compare_function
dshash_copy_function
dshash_hash dshash_hash
dshash_hash_function dshash_hash_function
dshash_parameters dshash_parameters
@ -3395,6 +3436,7 @@ emit_log_hook_type
eval_const_expressions_context eval_const_expressions_context
exec_thread_arg exec_thread_arg
execution_state execution_state
exit_function
explain_get_index_name_hook_type explain_get_index_name_hook_type
f_smgr f_smgr
fasthash_state fasthash_state
@ -3493,6 +3535,7 @@ indexed_tlist
inet inet
inetKEY inetKEY
inet_struct inet_struct
initRowMethod
init_function init_function
inline_cte_walker_context inline_cte_walker_context
inline_error_callback_arg inline_error_callback_arg
@ -3508,12 +3551,14 @@ int32_t
int64 int64
int64KEY int64KEY
int8 int8
int8x16_t
internalPQconninfoOption internalPQconninfoOption
intptr_t intptr_t
intset_internal_node intset_internal_node
intset_leaf_node intset_leaf_node
intset_node intset_node
intvKEY intvKEY
io_callback_fn
io_stat_col io_stat_col
itemIdCompact itemIdCompact
itemIdCompactData itemIdCompactData
@ -3524,6 +3569,8 @@ json_aelem_action
json_manifest_error_callback json_manifest_error_callback
json_manifest_per_file_callback json_manifest_per_file_callback
json_manifest_per_wal_range_callback json_manifest_per_wal_range_callback
json_manifest_system_identifier_callback
json_manifest_version_callback
json_ofield_action json_ofield_action
json_scalar_action json_scalar_action
json_struct_action json_struct_action
@ -3540,6 +3587,8 @@ list_sort_comparator
local_relopt local_relopt
local_relopts local_relopts
local_source local_source
local_ts_iter
local_ts_radix_tree
locale_t locale_t
locate_agg_of_level_context locate_agg_of_level_context
locate_var_of_level_context locate_var_of_level_context
@ -3558,10 +3607,12 @@ macKEY
macaddr macaddr
macaddr8 macaddr8
macaddr_sortsupport_state macaddr_sortsupport_state
manifest_data
manifest_file manifest_file
manifest_files_hash manifest_files_hash
manifest_files_iterator manifest_files_iterator
manifest_wal_range manifest_wal_range
manifest_writer
map_variable_attnos_context map_variable_attnos_context
max_parallel_hazard_context max_parallel_hazard_context
mb2wchar_with_len_converter mb2wchar_with_len_converter
@ -3608,14 +3659,16 @@ pairingheap_node
pam_handle_t pam_handle_t
parallel_worker_main_type parallel_worker_main_type
parse_error_callback_arg parse_error_callback_arg
parser_context
partition_method_t partition_method_t
pendingPosition pendingPosition
pending_label
pgParameterStatus pgParameterStatus
pg_atomic_flag pg_atomic_flag
pg_atomic_uint32 pg_atomic_uint32
pg_atomic_uint64 pg_atomic_uint64
pg_be_sasl_mech pg_be_sasl_mech
pg_case_map
pg_category_range
pg_checksum_context pg_checksum_context
pg_checksum_raw_context pg_checksum_raw_context
pg_checksum_type pg_checksum_type
@ -3659,10 +3712,13 @@ pg_time_usec_t
pg_tz pg_tz
pg_tz_cache pg_tz_cache
pg_tzenum pg_tzenum
pg_unicode_category
pg_unicode_decompinfo pg_unicode_decompinfo
pg_unicode_decomposition pg_unicode_decomposition
pg_unicode_norminfo pg_unicode_norminfo
pg_unicode_normprops pg_unicode_normprops
pg_unicode_properties
pg_unicode_range
pg_unicode_recompinfo pg_unicode_recompinfo
pg_utf_to_local_combined pg_utf_to_local_combined
pg_uuid_t pg_uuid_t
@ -3788,24 +3844,32 @@ remove_nulling_relids_context
rendezvousHashEntry rendezvousHashEntry
replace_rte_variables_callback replace_rte_variables_callback
replace_rte_variables_context replace_rte_variables_context
report_error_fn
ret_type ret_type
rewind_source rewind_source
rewrite_event rewrite_event
rf_context rf_context
rfile
rm_detail_t rm_detail_t
rt_node_class_test_elem
role_auth_extra role_auth_extra
rolename_hash rolename_hash
row_security_policy_hook_type row_security_policy_hook_type
rsv_callback rsv_callback
rt_iter
rt_node_class_test_elem
rt_radix_tree
saophash_hash saophash_hash
save_buffer save_buffer
scram_state scram_state
scram_state_enum scram_state_enum
security_class_t
sem_t sem_t
sepgsql_context_info_t
sequence_magic sequence_magic
set_join_pathlist_hook_type set_join_pathlist_hook_type
set_rel_pathlist_hook_type set_rel_pathlist_hook_type
shared_ts_iter
shared_ts_radix_tree
shm_mq shm_mq
shm_mq_handle shm_mq_handle
shm_mq_iovec shm_mq_iovec
@ -3871,6 +3935,7 @@ substitute_actual_srf_parameters_context
substitute_phv_relids_context substitute_phv_relids_context
symbol symbol
tablespaceinfo tablespaceinfo
td_entry
teSection teSection
temp_tablespaces_extra temp_tablespaces_extra
test_re_flags test_re_flags
@ -3912,6 +3977,7 @@ uid_t
uint128 uint128
uint16 uint16
uint16_t uint16_t
uint16x8_t
uint32 uint32
uint32_t uint32_t
uint32x4_t uint32x4_t
@ -3951,6 +4017,7 @@ walrcv_endstreaming_fn
walrcv_exec_fn walrcv_exec_fn
walrcv_get_backend_pid_fn walrcv_get_backend_pid_fn
walrcv_get_conninfo_fn walrcv_get_conninfo_fn
walrcv_get_dbname_from_conninfo_fn
walrcv_get_senderinfo_fn walrcv_get_senderinfo_fn
walrcv_identify_system_fn walrcv_identify_system_fn
walrcv_readtimelinehistoryfile_fn walrcv_readtimelinehistoryfile_fn
@ -3962,10 +4029,11 @@ wchar2mb_with_len_converter
wchar_t wchar_t
win32_deadchild_waitinfo win32_deadchild_waitinfo
wint_t wint_t
worker_spi_state
worker_state worker_state
worktable worktable
wrap wrap
ws_file_info
ws_options
xl_brin_createidx xl_brin_createidx
xl_brin_desummarize xl_brin_desummarize
xl_brin_insert xl_brin_insert
@ -4059,6 +4127,7 @@ xmlBuffer
xmlBufferPtr xmlBufferPtr
xmlChar xmlChar
xmlDocPtr xmlDocPtr
xmlError
xmlErrorPtr xmlErrorPtr
xmlExternalEntityLoader xmlExternalEntityLoader
xmlGenericErrorFunc xmlGenericErrorFunc
@ -4085,35 +4154,3 @@ yyscan_t
z_stream z_stream
z_streamp z_streamp
zic_t zic_t
BlockRefTable
BlockRefTableBuffer
BlockRefTableEntry
BlockRefTableKey
BlockRefTableReader
BlockRefTableSerializedEntry
BlockRefTableWriter
SummarizerReadLocalXLogPrivate
SysloggerStartupData
WalSummarizerData
WalSummaryFile
WalSummaryIO
FileBackupMethod
IncrementalBackupInfo
UploadManifestCmd
backup_file_entry
backup_wal_range
cb_cleanup_dir
cb_options
cb_tablespace
cb_tablespace_mapping
manifest_data
manifest_writer
rfile
ws_options
ws_file_info
PathKeyInfo
TidStore
TidStoreIter
TidStoreIterResult
BlocktableEntry
ItemArray