Pre-beta mechanical code beautification.

Run pgindent, pgperltidy, and reformat-dat-files.

The pgindent part of this is pretty small, consisting mainly of
fixing up self-inflicted formatting damage from patches that
hadn't bothered to add their new typedefs to typedefs.list.
In order to keep it from making anything worse, I manually added
a dozen or so typedefs that appeared in the existing typedefs.list
but not in the buildfarm's list.  Perhaps we should formalize that,
or better find a way to get those typedefs into the automatic list.

pgperltidy is as opinionated as always, and reformat-dat-files too.
This commit is contained in:
Tom Lane 2024-05-14 16:34:50 -04:00
parent 3ddbac368c
commit da256a4a7f
60 changed files with 969 additions and 689 deletions

View File

@ -50,7 +50,7 @@ typedef struct
* command. Elsewhere (including the case of default) NULL.
*/
const char *createdb_dtemplate;
} sepgsql_context_info_t;
} sepgsql_context_info_t;
static sepgsql_context_info_t sepgsql_context_info;

View File

@ -67,7 +67,7 @@ typedef struct
{
SubTransactionId subid;
char *label;
} pending_label;
} pending_label;
/*
* sepgsql_get_client_label

View File

@ -44,7 +44,7 @@ typedef struct
/* true, if tcontext is valid */
char *ncontext; /* temporary scontext on execution of trusted
* procedure, or NULL elsewhere */
} avc_cache;
} avc_cache;
/*
* Declaration of static variables

View File

@ -315,72 +315,72 @@ sub ParseData
my $catname = $1;
my $data = [];
# Scan the input file.
while (<$ifd>)
# Scan the input file.
while (<$ifd>)
{
my $hash_ref;
if (/{/)
{
my $hash_ref;
# Capture the hash ref
# NB: Assumes that the next hash ref can't start on the
# same line where the present one ended.
# Not foolproof, but we shouldn't need a full parser,
# since we expect relatively well-behaved input.
if (/{/)
# Quick hack to detect when we have a full hash ref to
# parse. We can't just use a regex because of values in
# pg_aggregate and pg_proc like '{0,0}'. This will need
# work if we ever need to allow unbalanced braces within
# a field value.
my $lcnt = tr/{//;
my $rcnt = tr/}//;
if ($lcnt == $rcnt)
{
# Capture the hash ref
# NB: Assumes that the next hash ref can't start on the
# same line where the present one ended.
# Not foolproof, but we shouldn't need a full parser,
# since we expect relatively well-behaved input.
# Quick hack to detect when we have a full hash ref to
# parse. We can't just use a regex because of values in
# pg_aggregate and pg_proc like '{0,0}'. This will need
# work if we ever need to allow unbalanced braces within
# a field value.
my $lcnt = tr/{//;
my $rcnt = tr/}//;
if ($lcnt == $rcnt)
# We're treating the input line as a piece of Perl, so we
# need to use string eval here. Tell perlcritic we know what
# we're doing.
eval "\$hash_ref = $_"; ## no critic (ProhibitStringyEval)
if (!ref $hash_ref)
{
# We're treating the input line as a piece of Perl, so we
# need to use string eval here. Tell perlcritic we know what
# we're doing.
eval "\$hash_ref = $_"; ## no critic (ProhibitStringyEval)
if (!ref $hash_ref)
{
die "$input_file: error parsing line $.:\n$_\n";
}
# Annotate each hash with the source line number.
$hash_ref->{line_number} = $.;
# Expand tuples to their full representation.
AddDefaultValues($hash_ref, $schema, $catname);
die "$input_file: error parsing line $.:\n$_\n";
}
else
{
my $next_line = <$ifd>;
die "$input_file: file ends within Perl hash\n"
if !defined $next_line;
$_ .= $next_line;
redo;
}
}
# If we found a hash reference, keep it, unless it is marked as
# autogenerated; in that case it'd duplicate an entry we'll
# autogenerate below. (This makes it safe for reformat_dat_file.pl
# with --full-tuples to print autogenerated entries, which seems like
# useful behavior for debugging.)
#
# Otherwise, we have a non-data string, which we keep only if
# the caller requested it.
if (defined $hash_ref)
{
push @$data, $hash_ref if !$hash_ref->{autogenerated};
# Annotate each hash with the source line number.
$hash_ref->{line_number} = $.;
# Expand tuples to their full representation.
AddDefaultValues($hash_ref, $schema, $catname);
}
else
{
push @$data, $_ if $preserve_comments;
my $next_line = <$ifd>;
die "$input_file: file ends within Perl hash\n"
if !defined $next_line;
$_ .= $next_line;
redo;
}
}
# If we found a hash reference, keep it, unless it is marked as
# autogenerated; in that case it'd duplicate an entry we'll
# autogenerate below. (This makes it safe for reformat_dat_file.pl
# with --full-tuples to print autogenerated entries, which seems like
# useful behavior for debugging.)
#
# Otherwise, we have a non-data string, which we keep only if
# the caller requested it.
if (defined $hash_ref)
{
push @$data, $hash_ref if !$hash_ref->{autogenerated};
}
else
{
push @$data, $_ if $preserve_comments;
}
}
close $ifd;
# If this is pg_type, auto-generate array types too.

View File

@ -302,9 +302,7 @@ $node->safe_psql(
));
$node->command_checks_all(
[
'pg_amcheck', '-d', 'regression_invalid'
],
[ 'pg_amcheck', '-d', 'regression_invalid' ],
1,
[qr/^$/],
[
@ -314,8 +312,7 @@ $node->command_checks_all(
$node->command_checks_all(
[
'pg_amcheck', '-d', 'postgres',
'-t', 'regression_invalid.public.foo',
'pg_amcheck', '-d', 'postgres', '-t', 'regression_invalid.public.foo',
],
1,
[qr/^$/],

View File

@ -411,7 +411,9 @@ SKIP:
$tblspc_tars[0] =~ m|/([0-9]*)\.tar$|;
my $tblspcoid = $1;
my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
$node2->init_from_backup($node, 'tarbackup2', tar_program => $tar,
$node2->init_from_backup(
$node, 'tarbackup2',
tar_program => $tar,
'tablespace_map' => { $tblspcoid => $realRepTsDir });
$node2->start;
@ -776,10 +778,8 @@ $node->command_ok(
'stream', '-d', "dbname=db1", '-R',
],
'pg_basebackup with dbname and -R runs');
like(
slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"),
qr/dbname=db1/m,
'recovery conf file sets dbname');
like(slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"),
qr/dbname=db1/m, 'recovery conf file sets dbname');
rmtree("$tempdir/backup_dbname_R");
@ -976,8 +976,11 @@ $node2->append_conf('postgresql.conf', 'summarize_wal = on');
$node2->start;
$node2->command_fails_like(
[ @pg_basebackup_defs, '-D', "$tempdir" . '/diff_sysid',
'--incremental', "$backupdir" . '/backup_manifest' ],
[
@pg_basebackup_defs, '-D',
"$tempdir" . '/diff_sysid', '--incremental',
"$backupdir" . '/backup_manifest'
],
qr/manifest system identifier is .*, but database system identifier is/,
"pg_basebackup fails with different database system manifest");

View File

@ -140,11 +140,11 @@ command_fails(
'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata',
$node_t->data_dir, '--publisher-server',
$node_p->connstr('pg1'),
'--socket-directory', $node_t->host,
'--subscriber-port', $node_t->port,
'--database', 'pg1',
'--database', 'pg2'
$node_p->connstr('pg1'), '--socket-directory',
$node_t->host, '--subscriber-port',
$node_t->port, '--database',
'pg1', '--database',
'pg2'
],
'target server is not in recovery');
@ -154,11 +154,11 @@ command_fails(
'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'),
'--socket-directory', $node_s->host,
'--subscriber-port', $node_s->port,
'--database', 'pg1',
'--database', 'pg2'
$node_p->connstr('pg1'), '--socket-directory',
$node_s->host, '--subscriber-port',
$node_s->port, '--database',
'pg1', '--database',
'pg2'
],
'standby is up and running');
@ -188,11 +188,11 @@ command_fails(
'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata',
$node_c->data_dir, '--publisher-server',
$node_s->connstr('pg1'),
'--socket-directory', $node_c->host,
'--subscriber-port', $node_c->port,
'--database', 'pg1',
'--database', 'pg2'
$node_s->connstr('pg1'), '--socket-directory',
$node_c->host, '--subscriber-port',
$node_c->port, '--database',
'pg1', '--database',
'pg2'
],
'primary server is in recovery');
@ -201,7 +201,8 @@ $node_p->safe_psql('pg1', "INSERT INTO tbl1 VALUES('second row')");
$node_p->wait_for_replay_catchup($node_s);
# Check some unmet conditions on node P
$node_p->append_conf('postgresql.conf', q{
$node_p->append_conf(
'postgresql.conf', q{
wal_level = replica
max_replication_slots = 1
max_wal_senders = 1
@ -214,16 +215,17 @@ command_fails(
'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'),
'--socket-directory', $node_s->host,
'--subscriber-port', $node_s->port,
'--database', 'pg1',
'--database', 'pg2'
$node_p->connstr('pg1'), '--socket-directory',
$node_s->host, '--subscriber-port',
$node_s->port, '--database',
'pg1', '--database',
'pg2'
],
'primary contains unmet conditions on node P');
# Restore default settings here but only apply it after testing standby. Some
# standby settings should not be a lower setting than on the primary.
$node_p->append_conf('postgresql.conf', q{
$node_p->append_conf(
'postgresql.conf', q{
wal_level = logical
max_replication_slots = 10
max_wal_senders = 10
@ -231,7 +233,8 @@ max_worker_processes = 8
});
# Check some unmet conditions on node S
$node_s->append_conf('postgresql.conf', q{
$node_s->append_conf(
'postgresql.conf', q{
max_replication_slots = 1
max_logical_replication_workers = 1
max_worker_processes = 2
@ -241,14 +244,15 @@ command_fails(
'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'),
'--socket-directory', $node_s->host,
'--subscriber-port', $node_s->port,
'--database', 'pg1',
'--database', 'pg2'
$node_p->connstr('pg1'), '--socket-directory',
$node_s->host, '--subscriber-port',
$node_s->port, '--database',
'pg1', '--database',
'pg2'
],
'standby contains unmet conditions on node S');
$node_s->append_conf('postgresql.conf', q{
$node_s->append_conf(
'postgresql.conf', q{
max_replication_slots = 10
max_logical_replication_workers = 4
max_worker_processes = 8
@ -262,15 +266,15 @@ command_ok(
'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'),
'--socket-directory', $node_s->host,
'--subscriber-port', $node_s->port,
'--publication', 'pub1',
'--publication', 'pub2',
'--subscription', 'sub1',
'--subscription', 'sub2',
'--database', 'pg1',
'--database', 'pg2'
$node_p->connstr('pg1'), '--socket-directory',
$node_s->host, '--subscriber-port',
$node_s->port, '--publication',
'pub1', '--publication',
'pub2', '--subscription',
'sub1', '--subscription',
'sub2', '--database',
'pg1', '--database',
'pg2'
],
'run pg_createsubscriber --dry-run on node S');
@ -286,10 +290,10 @@ command_ok(
'pg_createsubscriber', '--verbose',
'--dry-run', '--pgdata',
$node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'),
'--socket-directory', $node_s->host,
'--subscriber-port', $node_s->port,
'--replication-slot', 'replslot1'
$node_p->connstr('pg1'), '--socket-directory',
$node_s->host, '--subscriber-port',
$node_s->port, '--replication-slot',
'replslot1'
],
'run pg_createsubscriber without --databases');
@ -299,15 +303,15 @@ command_ok(
'pg_createsubscriber', '--verbose',
'--verbose', '--pgdata',
$node_s->data_dir, '--publisher-server',
$node_p->connstr('pg1'),
'--socket-directory', $node_s->host,
'--subscriber-port', $node_s->port,
'--publication', 'pub1',
'--publication', 'Pub2',
'--replication-slot', 'replslot1',
'--replication-slot', 'replslot2',
'--database', 'pg1',
'--database', 'pg2'
$node_p->connstr('pg1'), '--socket-directory',
$node_s->host, '--subscriber-port',
$node_s->port, '--publication',
'pub1', '--publication',
'Pub2', '--replication-slot',
'replslot1', '--replication-slot',
'replslot2', '--database',
'pg1', '--database',
'pg2'
],
'run pg_createsubscriber on node S');

View File

@ -119,7 +119,7 @@ append_to_file "$pgdata/global/pg_internal.init.123", "foo";
# Only perform this test on non-macOS systems though as creating incorrect
# system files may have side effects on macOS.
append_to_file "$pgdata/global/.DS_Store", "foo"
unless ($Config{osname} eq 'darwin');
unless ($Config{osname} eq 'darwin');
# Enable checksums.
command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],

View File

@ -44,7 +44,7 @@ EOM
# Read list of tablespace OIDs. There should be just one.
my @tsoids = grep { /^\d+/ } slurp_dir($primary->data_dir . '/pg_tblspc');
is(0+@tsoids, 1, "exactly one user-defined tablespace");
is(0 + @tsoids, 1, "exactly one user-defined tablespace");
my $tsoid = $tsoids[0];
# Take a full backup.
@ -52,8 +52,12 @@ my $backup1path = $primary->backup_dir . '/backup1';
my $tsbackup1path = $tempdir . '/ts1backup';
mkdir($tsbackup1path) || die "mkdir $tsbackup1path: $!";
$primary->command_ok(
[ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast',
"-T${tsprimary}=${tsbackup1path}" ], "full backup");
[
'pg_basebackup', '-D',
$backup1path, '--no-sync',
'-cfast', "-T${tsprimary}=${tsbackup1path}"
],
"full backup");
# Now make some database changes.
$primary->safe_psql('postgres', <<EOM);
@ -79,9 +83,12 @@ my $backup2path = $primary->backup_dir . '/backup2';
my $tsbackup2path = $tempdir . '/tsbackup2';
mkdir($tsbackup2path) || die "mkdir $tsbackup2path: $!";
$primary->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
"-T${tsprimary}=${tsbackup2path}",
'--incremental', $backup1path . '/backup_manifest' ],
[
'pg_basebackup', '-D',
$backup2path, '--no-sync',
'-cfast', "-T${tsprimary}=${tsbackup2path}",
'--incremental', $backup1path . '/backup_manifest'
],
"incremental backup");
# Find an LSN to which either backup can be recovered.
@ -105,10 +112,13 @@ $primary->poll_query_until('postgres', $archive_wait_query)
# choose the same timeline.
my $tspitr1path = $tempdir . '/tspitr1';
my $pitr1 = PostgreSQL::Test::Cluster->new('pitr1');
$pitr1->init_from_backup($primary, 'backup1',
standby => 1, has_restoring => 1,
tablespace_map => { $tsoid => $tspitr1path });
$pitr1->append_conf('postgresql.conf', qq{
$pitr1->init_from_backup(
$primary, 'backup1',
standby => 1,
has_restoring => 1,
tablespace_map => { $tsoid => $tspitr1path });
$pitr1->append_conf(
'postgresql.conf', qq{
recovery_target_lsn = '$lsn'
recovery_target_action = 'promote'
archive_mode = 'off'
@ -119,11 +129,14 @@ $pitr1->start();
# basic configuration as before.
my $tspitr2path = $tempdir . '/tspitr2';
my $pitr2 = PostgreSQL::Test::Cluster->new('pitr2');
$pitr2->init_from_backup($primary, 'backup2',
standby => 1, has_restoring => 1,
combine_with_prior => [ 'backup1' ],
tablespace_map => { $tsbackup2path => $tspitr2path });
$pitr2->append_conf('postgresql.conf', qq{
$pitr2->init_from_backup(
$primary, 'backup2',
standby => 1,
has_restoring => 1,
combine_with_prior => ['backup1'],
tablespace_map => { $tsbackup2path => $tspitr2path });
$pitr2->append_conf(
'postgresql.conf', qq{
recovery_target_lsn = '$lsn'
recovery_target_action = 'promote'
archive_mode = 'off'
@ -131,11 +144,9 @@ archive_mode = 'off'
$pitr2->start();
# Wait until both servers exit recovery.
$pitr1->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery();")
$pitr1->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery();")
or die "Timed out while waiting apply to reach LSN $lsn";
$pitr2->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery();")
$pitr2->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery();")
or die "Timed out while waiting apply to reach LSN $lsn";
# Perform a logical dump of each server, and check that they match.
@ -150,14 +161,20 @@ $pitr2->poll_query_until('postgres',
my $backupdir = $primary->backup_dir;
my $dump1 = $backupdir . '/pitr1.dump';
my $dump2 = $backupdir . '/pitr2.dump';
$pitr1->command_ok([
'pg_dumpall', '-f', $dump1, '--no-sync', '--no-unlogged-table-data',
'-d', $pitr1->connstr('postgres'),
$pitr1->command_ok(
[
'pg_dumpall', '-f',
$dump1, '--no-sync',
'--no-unlogged-table-data', '-d',
$pitr1->connstr('postgres'),
],
'dump from PITR 1');
$pitr1->command_ok([
'pg_dumpall', '-f', $dump2, '--no-sync', '--no-unlogged-table-data',
'-d', $pitr1->connstr('postgres'),
$pitr1->command_ok(
[
'pg_dumpall', '-f',
$dump2, '--no-sync',
'--no-unlogged-table-data', '-d',
$pitr1->connstr('postgres'),
],
'dump from PITR 2');
@ -171,7 +188,7 @@ is($compare_res, 0, "dumps are identical");
if ($compare_res != 0)
{
my ($stdout, $stderr) =
run_command([ 'diff', '-u', $dump1, $dump2 ]);
run_command([ 'diff', '-u', $dump1, $dump2 ]);
print "=== diff of $dump1 and $dump2\n";
print "=== stdout ===\n";
print $stdout;

View File

@ -36,14 +36,16 @@ EOM
# Now take an incremental backup.
my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest' ],
[
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest'
],
"incremental backup from node1");
# Restore the incremental backup and use it to create a new node.
my $node2 = PostgreSQL::Test::Cluster->new('node2');
$node2->init_from_backup($node1, 'backup2',
combine_with_prior => [ 'backup1' ]);
combine_with_prior => ['backup1']);
$node2->start();
# Insert rows on both nodes.
@ -57,14 +59,16 @@ EOM
# Take another incremental backup, from node2, based on backup2 from node1.
my $backup3path = $node1->backup_dir . '/backup3';
$node2->command_ok(
[ 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
'--incremental', $backup2path . '/backup_manifest' ],
[
'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
'--incremental', $backup2path . '/backup_manifest'
],
"incremental backup from node2");
# Restore the incremental backup and use it to create a new node.
my $node3 = PostgreSQL::Test::Cluster->new('node3');
$node3->init_from_backup($node1, 'backup3',
combine_with_prior => [ 'backup1', 'backup2' ]);
combine_with_prior => [ 'backup1', 'backup2' ]);
$node3->start();
# Let's insert one more row.

View File

@ -33,40 +33,40 @@ sub combine_and_test_one_backup
my ($backup_name, $failure_pattern, @extra_options) = @_;
my $revised_backup_path = $node->backup_dir . '/' . $backup_name;
$node->command_ok(
[ 'pg_combinebackup', $original_backup_path, '-o', $revised_backup_path,
'--no-sync', @extra_options ],
[
'pg_combinebackup', $original_backup_path,
'-o', $revised_backup_path,
'--no-sync', @extra_options
],
"pg_combinebackup with @extra_options");
if (defined $failure_pattern)
{
$node->command_fails_like(
[ 'pg_verifybackup', $revised_backup_path ],
$failure_pattern,
"unable to verify backup $backup_name");
$node->command_fails_like([ 'pg_verifybackup', $revised_backup_path ],
$failure_pattern, "unable to verify backup $backup_name");
}
else
{
$node->command_ok(
[ 'pg_verifybackup', $revised_backup_path ],
$node->command_ok([ 'pg_verifybackup', $revised_backup_path ],
"verify backup $backup_name");
}
}
combine_and_test_one_backup('nomanifest',
qr/could not open file.*backup_manifest/, '--no-manifest');
combine_and_test_one_backup('csum_none',
undef, '--manifest-checksums=NONE');
qr/could not open file.*backup_manifest/,
'--no-manifest');
combine_and_test_one_backup('csum_none', undef, '--manifest-checksums=NONE');
combine_and_test_one_backup('csum_sha224',
undef, '--manifest-checksums=SHA224');
# Verify that SHA224 is mentioned in the SHA224 manifest lots of times.
my $sha224_manifest =
slurp_file($node->backup_dir . '/csum_sha224/backup_manifest');
slurp_file($node->backup_dir . '/csum_sha224/backup_manifest');
my $sha224_count = (() = $sha224_manifest =~ /SHA224/mig);
cmp_ok($sha224_count,
'>', 100, "SHA224 is mentioned many times in SHA224 manifest");
# Verify that SHA224 is mentioned in the SHA224 manifest lots of times.
my $nocsum_manifest =
slurp_file($node->backup_dir . '/csum_none/backup_manifest');
slurp_file($node->backup_dir . '/csum_none/backup_manifest');
my $nocsum_count = (() = $nocsum_manifest =~ /Checksum-Algorithm/mig);
is($nocsum_count, 0,
"Checksum-Algorithm is not mentioned in no-checksum manifest");

View File

@ -25,7 +25,7 @@ $node1->start;
# cause anything to fail.
my $strangely_named_config_file = $node1->data_dir . '/INCREMENTAL.config';
open(my $icfg, '>', $strangely_named_config_file)
|| die "$strangely_named_config_file: $!";
|| die "$strangely_named_config_file: $!";
close($icfg);
# Set up another new database instance. force_initdb is used because
@ -44,15 +44,19 @@ $node1->command_ok(
# Now take an incremental backup.
my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest' ],
[
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest'
],
"incremental backup from node1");
# Now take another incremental backup.
my $backup3path = $node1->backup_dir . '/backup3';
$node1->command_ok(
[ 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
'--incremental', $backup2path . '/backup_manifest' ],
[
'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
'--incremental', $backup2path . '/backup_manifest'
],
"another incremental backup from node1");
# Take a full backup from node2.
@ -64,8 +68,10 @@ $node2->command_ok(
# Take an incremental backup from node2.
my $backupother2path = $node1->backup_dir . '/backupother2';
$node2->command_ok(
[ 'pg_basebackup', '-D', $backupother2path, '--no-sync', '-cfast',
'--incremental', $backupother1path . '/backup_manifest' ],
[
'pg_basebackup', '-D', $backupother2path, '--no-sync', '-cfast',
'--incremental', $backupother1path . '/backup_manifest'
],
"incremental backup from node2");
# Result directory.
@ -85,7 +91,10 @@ $node1->command_fails_like(
# Can't combine full backup with an incremental backup from a different system.
$node1->command_fails_like(
[ 'pg_combinebackup', $backup1path, $backupother2path, '-o', $resultpath ],
[
'pg_combinebackup', $backup1path, $backupother2path, '-o',
$resultpath
],
qr/expected system identifier.*but found/,
"can't combine backups from different nodes");
@ -95,7 +104,10 @@ rename("$backup2path/backup_manifest", "$backup2path/backup_manifest.orig")
copy("$backupother2path/backup_manifest", "$backup2path/backup_manifest")
or die "could not copy $backupother2path/backup_manifest";
$node1->command_fails_like(
[ 'pg_combinebackup', $backup1path, $backup2path, $backup3path, '-o', $resultpath ],
[
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
'-o', $resultpath
],
qr/ manifest system identifier is .*, but control file has /,
"can't combine backups with different manifest system identifier ");
# Restore the backup state
@ -110,20 +122,29 @@ $node1->command_fails_like(
# Can't combine backups in the wrong order.
$node1->command_fails_like(
[ 'pg_combinebackup', $backup1path, $backup3path, $backup2path, '-o', $resultpath ],
[
'pg_combinebackup', $backup1path, $backup3path, $backup2path,
'-o', $resultpath
],
qr/starts at LSN.*but expected/,
"can't combine backups in the wrong order");
# Can combine 3 backups that match up properly.
$node1->command_ok(
[ 'pg_combinebackup', $backup1path, $backup2path, $backup3path, '-o', $resultpath ],
[
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
'-o', $resultpath
],
"can combine 3 matching backups");
rmtree($resultpath);
# Can combine full backup with first incremental.
my $synthetic12path = $node1->backup_dir . '/synthetic12';
$node1->command_ok(
[ 'pg_combinebackup', $backup1path, $backup2path, '-o', $synthetic12path ],
[
'pg_combinebackup', $backup1path, $backup2path, '-o',
$synthetic12path
],
"can combine 2 matching backups");
# Can combine result of previous step with second incremental.

View File

@ -36,23 +36,29 @@ EOM
# Take an incremental backup.
my $backup2path = $primary->backup_dir . '/backup2';
$primary->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest' ],
[
'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
'--incremental', $backup1path . '/backup_manifest'
],
"incremental backup");
# Recover the incremental backup.
my $restore = PostgreSQL::Test::Cluster->new('restore');
$restore->init_from_backup($primary, 'backup2',
combine_with_prior => [ 'backup1' ]);
combine_with_prior => ['backup1']);
$restore->start();
# Query the DB.
my $stdout;
my $stderr;
$restore->psql('lakh', 'SELECT * FROM t1',
stdout => \$stdout, stderr => \$stderr);
$restore->psql(
'lakh', 'SELECT * FROM t1',
stdout => \$stdout,
stderr => \$stderr);
is($stdout, '', 'SELECT * FROM t1: no stdout');
like($stderr, qr/relation "t1" does not exist/,
'SELECT * FROM t1: stderr missing table');
like(
$stderr,
qr/relation "t1" does not exist/,
'SELECT * FROM t1: stderr missing table');
done_testing();

View File

@ -3855,9 +3855,7 @@ my %tests = (
\QCREATE INDEX measurement_city_id_logdate_idx ON ONLY dump_test.measurement USING\E
/xm,
like => {
%full_runs,
%dump_test_schema_runs,
section_post_data => 1,
%full_runs, %dump_test_schema_runs, section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
@ -4783,10 +4781,8 @@ $node->command_fails_like(
##############################################################
# Test dumping pg_catalog (for research -- cannot be reloaded)
$node->command_ok(
[ 'pg_dump', '-p', "$port", '-n', 'pg_catalog' ],
'pg_dump: option -n pg_catalog'
);
$node->command_ok([ 'pg_dump', '-p', "$port", '-n', 'pg_catalog' ],
'pg_dump: option -n pg_catalog');
#########################################
# Test valid database exclusion patterns
@ -4953,8 +4949,8 @@ foreach my $run (sort keys %pgdump_runs)
}
# Check for useless entries in "unlike" list. Runs that are
# not listed in "like" don't need to be excluded in "unlike".
if ($tests{$test}->{unlike}->{$test_key} &&
!defined($tests{$test}->{like}->{$test_key}))
if ($tests{$test}->{unlike}->{$test_key}
&& !defined($tests{$test}->{like}->{$test_key}))
{
die "useless \"unlike\" entry \"$test_key\" in test \"$test\"";
}

View File

@ -56,8 +56,8 @@ sub run_test
"in standby4";
# Skip testing .DS_Store files on macOS to avoid risk of side effects
append_to_file
"$test_standby_datadir/tst_standby_dir/.DS_Store",
"macOS system file" unless ($Config{osname} eq 'darwin');
"$test_standby_datadir/tst_standby_dir/.DS_Store", "macOS system file"
unless ($Config{osname} eq 'darwin');
mkdir "$test_primary_datadir/tst_primary_dir";
append_to_file "$test_primary_datadir/tst_primary_dir/primary_file1",

View File

@ -51,7 +51,7 @@ typedef struct
int threshold_version;
/* A function pointer for determining if the check applies */
DataTypesUsageVersionCheck version_hook;
} DataTypesUsageChecks;
} DataTypesUsageChecks;
/*
* Special values for threshold_version for indicating that a check applies to
@ -109,17 +109,17 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/
{
.status = gettext_noop("Checking for system-defined composite types in user tables"),
.report_filename = "tables_using_composite.txt",
.base_query =
"SELECT t.oid FROM pg_catalog.pg_type t "
"LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
" WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
.report_text =
gettext_noop("Your installation contains system-defined composite types in user tables.\n"
"These type OIDs are not stable across PostgreSQL versions,\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns and restart the upgrade.\n"),
.threshold_version = ALL_VERSIONS
.report_filename = "tables_using_composite.txt",
.base_query =
"SELECT t.oid FROM pg_catalog.pg_type t "
"LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
" WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
.report_text =
gettext_noop("Your installation contains system-defined composite types in user tables.\n"
"These type OIDs are not stable across PostgreSQL versions,\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns and restart the upgrade.\n"),
.threshold_version = ALL_VERSIONS
},
/*
@ -130,16 +130,16 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/
{
.status = gettext_noop("Checking for incompatible \"line\" data type"),
.report_filename = "tables_using_line.txt",
.base_query =
"SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
"This data type changed its internal and input/output format\n"
"between your old and new versions so this\n"
"cluster cannot currently be upgraded. You can\n"
"drop the problem columns and restart the upgrade.\n"),
.threshold_version = 903
.report_filename = "tables_using_line.txt",
.base_query =
"SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
"This data type changed its internal and input/output format\n"
"between your old and new versions so this\n"
"cluster cannot currently be upgraded. You can\n"
"drop the problem columns and restart the upgrade.\n"),
.threshold_version = 903
},
/*
@ -152,37 +152,37 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/
{
.status = gettext_noop("Checking for reg* data types in user tables"),
.report_filename = "tables_using_reg.txt",
.report_filename = "tables_using_reg.txt",
/*
* Note: older servers will not have all of these reg* types, so we
* have to write the query like this rather than depending on casts to
* regtype.
*/
.base_query =
"SELECT oid FROM pg_catalog.pg_type t "
"WHERE t.typnamespace = "
" (SELECT oid FROM pg_catalog.pg_namespace "
" WHERE nspname = 'pg_catalog') "
" AND t.typname IN ( "
.base_query =
"SELECT oid FROM pg_catalog.pg_type t "
"WHERE t.typnamespace = "
" (SELECT oid FROM pg_catalog.pg_namespace "
" WHERE nspname = 'pg_catalog') "
" AND t.typname IN ( "
/* pg_class.oid is preserved, so 'regclass' is OK */
" 'regcollation', "
" 'regconfig', "
" 'regdictionary', "
" 'regnamespace', "
" 'regoper', "
" 'regoperator', "
" 'regproc', "
" 'regprocedure' "
" 'regcollation', "
" 'regconfig', "
" 'regdictionary', "
" 'regnamespace', "
" 'regoper', "
" 'regoperator', "
" 'regproc', "
" 'regprocedure' "
/* pg_authid.oid is preserved, so 'regrole' is OK */
/* pg_type.oid is (mostly) preserved, so 'regtype' is OK */
" )",
.report_text =
gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
"These data types reference system OIDs that are not preserved by\n"
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
"drop the problem columns and restart the upgrade.\n"),
.threshold_version = ALL_VERSIONS
" )",
.report_text =
gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
"These data types reference system OIDs that are not preserved by\n"
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
"drop the problem columns and restart the upgrade.\n"),
.threshold_version = ALL_VERSIONS
},
/*
@ -191,15 +191,15 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/
{
.status = gettext_noop("Checking for incompatible \"aclitem\" data type"),
.report_filename = "tables_using_aclitem.txt",
.base_query =
"SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
"The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns and restart the upgrade.\n"),
.threshold_version = 1500
.report_filename = "tables_using_aclitem.txt",
.base_query =
"SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
"The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns and restart the upgrade.\n"),
.threshold_version = 1500
},
/*
@ -215,15 +215,15 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/
{
.status = gettext_noop("Checking for invalid \"unknown\" user columns"),
.report_filename = "tables_using_unknown.txt",
.base_query =
"SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
"This data type is no longer allowed in tables, so this cluster\n"
"cannot currently be upgraded. You can drop the problem columns\n"
"and restart the upgrade.\n"),
.threshold_version = 906
.report_filename = "tables_using_unknown.txt",
.base_query =
"SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
"This data type is no longer allowed in tables, so this cluster\n"
"cannot currently be upgraded. You can drop the problem columns\n"
"and restart the upgrade.\n"),
.threshold_version = 906
},
/*
@ -237,15 +237,15 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/
{
.status = gettext_noop("Checking for invalid \"sql_identifier\" user columns"),
.report_filename = "tables_using_sql_identifier.txt",
.base_query =
"SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
"The on-disk format for this data type has changed, so this\n"
"cluster cannot currently be upgraded. You can drop the problem\n"
"columns and restart the upgrade.\n"),
.threshold_version = 1100
.report_filename = "tables_using_sql_identifier.txt",
.base_query =
"SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
"The on-disk format for this data type has changed, so this\n"
"cluster cannot currently be upgraded. You can drop the problem\n"
"columns and restart the upgrade.\n"),
.threshold_version = 1100
},
/*
@ -253,16 +253,16 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/
{
.status = gettext_noop("Checking for incompatible \"jsonb\" data type in user tables"),
.report_filename = "tables_using_jsonb.txt",
.base_query =
"SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
"The internal format of \"jsonb\" changed during 9.4 beta so this\n"
"cluster cannot currently be upgraded. You can drop the problem \n"
"columns and restart the upgrade.\n"),
.threshold_version = MANUAL_CHECK,
.version_hook = jsonb_9_4_check_applicable
.report_filename = "tables_using_jsonb.txt",
.base_query =
"SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
"The internal format of \"jsonb\" changed during 9.4 beta so this\n"
"cluster cannot currently be upgraded. You can drop the problem \n"
"columns and restart the upgrade.\n"),
.threshold_version = MANUAL_CHECK,
.version_hook = jsonb_9_4_check_applicable
},
/*
@ -270,42 +270,42 @@ static DataTypesUsageChecks data_types_usage_checks[] =
*/
{
.status = gettext_noop("Checking for removed \"abstime\" data type in user tables"),
.report_filename = "tables_using_abstime.txt",
.base_query =
"SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
"The \"abstime\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"),
.threshold_version = 1100
.report_filename = "tables_using_abstime.txt",
.base_query =
"SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
"The \"abstime\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"),
.threshold_version = 1100
},
{
.status = gettext_noop("Checking for removed \"reltime\" data type in user tables"),
.report_filename = "tables_using_reltime.txt",
.base_query =
"SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
"The \"reltime\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"),
.threshold_version = 1100
.report_filename = "tables_using_reltime.txt",
.base_query =
"SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
"The \"reltime\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"),
.threshold_version = 1100
},
{
.status = gettext_noop("Checking for removed \"tinterval\" data type in user tables"),
.report_filename = "tables_using_tinterval.txt",
.base_query =
"SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
"The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"),
.threshold_version = 1100
.report_filename = "tables_using_tinterval.txt",
.base_query =
"SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
.report_text =
gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
"The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
"so this cluster cannot currently be upgraded. You can drop the\n"
"problem columns, or change them to another data type, and restart\n"
"the upgrade.\n"),
.threshold_version = 1100
},
/* End of checks marker, must remain last */
@ -334,7 +334,7 @@ static DataTypesUsageChecks data_types_usage_checks[] =
* there's no storage involved in a view.
*/
static void
check_for_data_types_usage(ClusterInfo *cluster, DataTypesUsageChecks * checks)
check_for_data_types_usage(ClusterInfo *cluster, DataTypesUsageChecks *checks)
{
bool found = false;
bool *results;

View File

@ -31,7 +31,8 @@ $newpub->init(allows_streaming => 'logical');
# completely till it is open. The probability of seeing this behavior is
# higher in this test because we use wal_level as logical via
# allows_streaming => 'logical' which in turn set shared_buffers as 1MB.
$newpub->append_conf('postgresql.conf', q{
$newpub->append_conf(
'postgresql.conf', q{
bgwriter_lru_maxpages = 0
checkpoint_timeout = 1h
});
@ -81,7 +82,7 @@ command_checks_all(
[qr//],
'run of pg_upgrade where the new cluster has insufficient max_replication_slots'
);
ok( -d $newpub->data_dir . "/pg_upgrade_output.d",
ok(-d $newpub->data_dir . "/pg_upgrade_output.d",
"pg_upgrade_output.d/ not removed after pg_upgrade failure");
# Set 'max_replication_slots' to match the number of slots (2) present on the

View File

@ -291,8 +291,7 @@ regress_sub5|f|f),
# Subscription relations should be preserved
$result = $new_sub->safe_psql('postgres',
"SELECT srrelid, srsubstate FROM pg_subscription_rel ORDER BY srrelid"
);
"SELECT srrelid, srsubstate FROM pg_subscription_rel ORDER BY srrelid");
is( $result, qq($tab_upgraded1_oid|r
$tab_upgraded2_oid|i),
"there should be 2 rows in pg_subscription_rel(representing tab_upgraded1 and tab_upgraded2)"

View File

@ -72,7 +72,8 @@ my @scenario = (
{
'name' => 'system_identifier',
'mutilate' => \&mutilate_system_identifier,
'fails_like' => qr/manifest system identifier is .*, but control file has/
'fails_like' =>
qr/manifest system identifier is .*, but control file has/
},
{
'name' => 'bad_manifest',
@ -254,8 +255,9 @@ sub mutilate_system_identifier
$node->init(force_initdb => 1, allows_streaming => 1);
$node->start;
$node->backup('backup2');
move($node->backup_dir.'/backup2/backup_manifest', $backup_path.'/backup_manifest')
or BAIL_OUT "could not copy manifest to $backup_path";
move($node->backup_dir . '/backup2/backup_manifest',
$backup_path . '/backup_manifest')
or BAIL_OUT "could not copy manifest to $backup_path";
$node->teardown_node(fail_ok => 1);
return;
}

View File

@ -12,7 +12,8 @@ use Test::More;
my $tempdir = PostgreSQL::Test::Utils::tempdir;
test_bad_manifest('input string ended unexpectedly',
test_bad_manifest(
'input string ended unexpectedly',
qr/could not parse backup manifest: The input string ended unexpectedly/,
<<EOM);
{

View File

@ -12,21 +12,47 @@ program_version_ok('pg_waldump');
program_options_handling_ok('pg_waldump');
# wrong number of arguments
command_fails_like([ 'pg_waldump', ], qr/error: no arguments/, 'no arguments');
command_fails_like([ 'pg_waldump', 'foo', 'bar', 'baz' ], qr/error: too many command-line arguments/, 'too many arguments');
command_fails_like([ 'pg_waldump', ], qr/error: no arguments/,
'no arguments');
command_fails_like(
[ 'pg_waldump', 'foo', 'bar', 'baz' ],
qr/error: too many command-line arguments/,
'too many arguments');
# invalid option arguments
command_fails_like([ 'pg_waldump', '--block', 'bad' ], qr/error: invalid block number/, 'invalid block number');
command_fails_like([ 'pg_waldump', '--fork', 'bad' ], qr/error: invalid fork name/, 'invalid fork name');
command_fails_like([ 'pg_waldump', '--limit', 'bad' ], qr/error: invalid value/, 'invalid limit');
command_fails_like([ 'pg_waldump', '--relation', 'bad' ], qr/error: invalid relation/, 'invalid relation specification');
command_fails_like([ 'pg_waldump', '--rmgr', 'bad' ], qr/error: resource manager .* does not exist/, 'invalid rmgr name');
command_fails_like([ 'pg_waldump', '--start', 'bad' ], qr/error: invalid WAL location/, 'invalid start LSN');
command_fails_like([ 'pg_waldump', '--end', 'bad' ], qr/error: invalid WAL location/, 'invalid end LSN');
command_fails_like(
[ 'pg_waldump', '--block', 'bad' ],
qr/error: invalid block number/,
'invalid block number');
command_fails_like(
[ 'pg_waldump', '--fork', 'bad' ],
qr/error: invalid fork name/,
'invalid fork name');
command_fails_like(
[ 'pg_waldump', '--limit', 'bad' ],
qr/error: invalid value/,
'invalid limit');
command_fails_like(
[ 'pg_waldump', '--relation', 'bad' ],
qr/error: invalid relation/,
'invalid relation specification');
command_fails_like(
[ 'pg_waldump', '--rmgr', 'bad' ],
qr/error: resource manager .* does not exist/,
'invalid rmgr name');
command_fails_like(
[ 'pg_waldump', '--start', 'bad' ],
qr/error: invalid WAL location/,
'invalid start LSN');
command_fails_like(
[ 'pg_waldump', '--end', 'bad' ],
qr/error: invalid WAL location/,
'invalid end LSN');
# rmgr list: If you add one to the list, consider also adding a test
# case exercising the new rmgr below.
command_like([ 'pg_waldump', '--rmgr=list'], qr/^XLOG
command_like(
[ 'pg_waldump', '--rmgr=list' ], qr/^XLOG
Transaction
Storage
CLOG
@ -53,7 +79,8 @@ LogicalMessage$/,
my $node = PostgreSQL::Test::Cluster->new('main');
$node->init;
$node->append_conf('postgresql.conf', q{
$node->append_conf(
'postgresql.conf', q{
autovacuum = off
checkpoint_timeout = 1h
@ -66,9 +93,13 @@ wal_level=logical
});
$node->start;
my ($start_lsn, $start_walfile) = split /\|/, $node->safe_psql('postgres', q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())});
my ($start_lsn, $start_walfile) = split /\|/,
$node->safe_psql('postgres',
q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())}
);
$node->safe_psql('postgres', q{
$node->safe_psql(
'postgres', q{
-- heap, btree, hash, sequence
CREATE TABLE t1 (a int GENERATED ALWAYS AS IDENTITY, b text);
CREATE INDEX i1a ON t1 USING btree (a);
@ -125,32 +156,75 @@ DROP DATABASE d1;
my $tblspc_path = PostgreSQL::Test::Utils::tempdir_short();
$node->safe_psql('postgres', qq{
$node->safe_psql(
'postgres', qq{
CREATE TABLESPACE ts1 LOCATION '$tblspc_path';
DROP TABLESPACE ts1;
});
my ($end_lsn, $end_walfile) = split /\|/, $node->safe_psql('postgres', q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())});
my ($end_lsn, $end_walfile) = split /\|/,
$node->safe_psql('postgres',
q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())}
);
my $default_ts_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_tablespace WHERE spcname = 'pg_default'});
my $postgres_db_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_database WHERE datname = 'postgres'});
my $rel_t1_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_class WHERE relname = 't1'});
my $rel_i1a_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_class WHERE relname = 'i1a'});
my $default_ts_oid = $node->safe_psql('postgres',
q{SELECT oid FROM pg_tablespace WHERE spcname = 'pg_default'});
my $postgres_db_oid = $node->safe_psql('postgres',
q{SELECT oid FROM pg_database WHERE datname = 'postgres'});
my $rel_t1_oid = $node->safe_psql('postgres',
q{SELECT oid FROM pg_class WHERE relname = 't1'});
my $rel_i1a_oid = $node->safe_psql('postgres',
q{SELECT oid FROM pg_class WHERE relname = 'i1a'});
$node->stop;
# various ways of specifying WAL range
command_fails_like([ 'pg_waldump', 'foo', 'bar' ], qr/error: could not locate WAL file "foo"/, 'start file not found');
command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile ], qr/./, 'runs with start segment specified');
command_fails_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, 'bar' ], qr/error: could not open file "bar"/, 'end file not found');
command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, $node->data_dir . '/pg_wal/' . $end_walfile ], qr/./, 'runs with start and end segment specified');
command_fails_like([ 'pg_waldump', '-p', $node->data_dir ], qr/error: no start WAL location given/, 'path option requires start location');
command_like([ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end', $end_lsn ], qr/./, 'runs with path option and start and end locations');
command_fails_like([ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ], qr/error: error in WAL record at/, 'falling off the end of the WAL results in an error');
command_fails_like(
[ 'pg_waldump', 'foo', 'bar' ],
qr/error: could not locate WAL file "foo"/,
'start file not found');
command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile ],
qr/./, 'runs with start segment specified');
command_fails_like(
[ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, 'bar' ],
qr/error: could not open file "bar"/,
'end file not found');
command_like(
[
'pg_waldump',
$node->data_dir . '/pg_wal/' . $start_walfile,
$node->data_dir . '/pg_wal/' . $end_walfile
],
qr/./,
'runs with start and end segment specified');
command_fails_like(
[ 'pg_waldump', '-p', $node->data_dir ],
qr/error: no start WAL location given/,
'path option requires start location');
command_like(
[
'pg_waldump', '-p', $node->data_dir, '--start',
$start_lsn, '--end', $end_lsn
],
qr/./,
'runs with path option and start and end locations');
command_fails_like(
[ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ],
qr/error: error in WAL record at/,
'falling off the end of the WAL results in an error');
command_like([ 'pg_waldump', '--quiet', $node->data_dir . '/pg_wal/' . $start_walfile ], qr/^$/, 'no output with --quiet option');
command_fails_like([ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ], qr/error: error in WAL record at/, 'errors are shown with --quiet');
command_like(
[
'pg_waldump', '--quiet',
$node->data_dir . '/pg_wal/' . $start_walfile
],
qr/^$/,
'no output with --quiet option');
command_fails_like(
[ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ],
qr/error: error in WAL record at/,
'errors are shown with --quiet');
# Test for: Display a message that we're skipping data if `from`
@ -165,7 +239,9 @@ command_fails_like([ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start',
my (@cmd, $stdout, $stderr, $result);
@cmd = ( 'pg_waldump', '--start', $new_start, $node->data_dir . '/pg_wal/' . $start_walfile );
@cmd = (
'pg_waldump', '--start', $new_start,
$node->data_dir . '/pg_wal/' . $start_walfile);
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
ok($result, "runs with start segment and start LSN specified");
like($stderr, qr/first record is after/, 'info message printed');
@ -181,7 +257,9 @@ sub test_pg_waldump
my (@cmd, $stdout, $stderr, $result, @lines);
@cmd = ('pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end', $end_lsn);
@cmd = (
'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end',
$end_lsn);
push @cmd, @opts;
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
ok($result, "pg_waldump @opts: runs ok");
@ -216,10 +294,15 @@ is(grep(!/^rmgr: Btree/, @lines), 0, 'only Btree lines');
@lines = test_pg_waldump('--fork', 'init');
is(grep(!/fork init/, @lines), 0, 'only init fork lines');
@lines = test_pg_waldump('--relation', "$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines), 0, 'only lines for selected relation');
@lines = test_pg_waldump('--relation',
"$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines),
0, 'only lines for selected relation');
@lines = test_pg_waldump('--relation', "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid", '--block', 1);
@lines =
test_pg_waldump('--relation',
"$default_ts_oid/$postgres_db_oid/$rel_i1a_oid",
'--block', 1);
is(grep(!/\bblk 1\b/, @lines), 0, 'only lines for selected block');

View File

@ -74,15 +74,15 @@ SELECT tli, start_lsn, end_lsn from pg_available_wal_summaries()
WHERE end_lsn > '$summarized_lsn'
EOM
my @lines = split(/\n/, $details);
is(0+@lines, 1, "got exactly one new WAL summary");
is(0 + @lines, 1, "got exactly one new WAL summary");
my ($tli, $start_lsn, $end_lsn) = split(/\|/, $lines[0]);
note("examining summary for TLI $tli from $start_lsn to $end_lsn");
# Reconstruct the full pathname for the WAL summary file.
my $filename = sprintf "%s/pg_wal/summaries/%08s%08s%08s%08s%08s.summary",
$node1->data_dir, $tli,
split(m@/@, $start_lsn),
split(m@/@, $end_lsn);
$node1->data_dir, $tli,
split(m@/@, $start_lsn),
split(m@/@, $end_lsn);
ok(-f $filename, "WAL summary file exists");
# Run pg_walsummary on it. We expect exactly two blocks to be modified,
@ -92,6 +92,6 @@ note($stdout);
@lines = split(/\n/, $stdout);
like($stdout, qr/FORK main: block 0$/m, "stdout shows block 0 modified");
is($stderr, '', 'stderr is empty');
is(0+@lines, 2, "UPDATE modified 2 blocks");
is(0 + @lines, 2, "UPDATE modified 2 blocks");
done_testing();

View File

@ -1541,18 +1541,13 @@ $node->safe_psql('postgres', 'DROP TABLE first_client_table, xy;');
# Test --exit-on-abort
$node->safe_psql('postgres',
'CREATE TABLE counter(i int); '.
'INSERT INTO counter VALUES (0);'
);
'CREATE TABLE counter(i int); ' . 'INSERT INTO counter VALUES (0);');
$node->pgbench(
'-t 10 -c 2 -j 2 --exit-on-abort',
2,
[],
[
qr{division by zero},
qr{Run was aborted due to an error in thread}
],
[ qr{division by zero}, qr{Run was aborted due to an error in thread} ],
'test --exit-on-abort',
{
'001_exit_on_abort' => q{

View File

@ -370,7 +370,8 @@ psql_fails_like(
psql_like(
$node,
sprintf(q{with x as (
sprintf(
q{with x as (
select now()-backend_start AS howlong
from pg_stat_activity
where pid = pg_backend_pid()
@ -416,20 +417,23 @@ psql_like($node, "SELECT 'one' \\g | $pipe_cmd", qr//, "one command \\g");
my $c1 = slurp_file($g_file);
like($c1, qr/one/);
psql_like($node, "SELECT 'two' \\; SELECT 'three' \\g | $pipe_cmd", qr//, "two commands \\g");
psql_like($node, "SELECT 'two' \\; SELECT 'three' \\g | $pipe_cmd",
qr//, "two commands \\g");
my $c2 = slurp_file($g_file);
like($c2, qr/two.*three/s);
psql_like($node, "\\set SHOW_ALL_RESULTS 0\nSELECT 'four' \\; SELECT 'five' \\g | $pipe_cmd", qr//,
"two commands \\g with only last result");
psql_like(
$node,
"\\set SHOW_ALL_RESULTS 0\nSELECT 'four' \\; SELECT 'five' \\g | $pipe_cmd",
qr//,
"two commands \\g with only last result");
my $c3 = slurp_file($g_file);
like($c3, qr/five/);
unlike($c3, qr/four/);
psql_like($node, "copy (values ('foo'),('bar')) to stdout \\g | $pipe_cmd",
qr//,
"copy output passed to \\g pipe");
qr//, "copy output passed to \\g pipe");
my $c4 = slurp_file($g_file);
like($c4, qr/foo.*bar/s);

View File

@ -25,13 +25,14 @@ $node->safe_psql(
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
$node->command_ok([ 'clusterdb', '-a' ],
'invalid database not targeted by clusterdb -a');
'invalid database not targeted by clusterdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 010_clusterdb.pl as well.
$node->command_fails_like([ 'clusterdb', '-d', 'regression_invalid'],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'clusterdb cannot target invalid database');
$node->command_fails_like(
[ 'clusterdb', '-d', 'regression_invalid' ],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'clusterdb cannot target invalid database');
$node->safe_psql('postgres',
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'

View File

@ -38,6 +38,6 @@ $node->safe_psql(
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
$node->command_ok([ 'dropdb', 'regression_invalid' ],
'invalid database can be dropped');
'invalid database can be dropped');
done_testing();

View File

@ -44,12 +44,13 @@ $node->safe_psql(
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
$node->command_ok([ 'reindexdb', '-a' ],
'invalid database not targeted by reindexdb -a');
'invalid database not targeted by reindexdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 090_reindexdb.pl as well.
$node->command_fails_like([ 'reindexdb', '-d', 'regression_invalid'],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'reindexdb cannot target invalid database');
$node->command_fails_like(
[ 'reindexdb', '-d', 'regression_invalid' ],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'reindexdb cannot target invalid database');
done_testing();

View File

@ -168,7 +168,10 @@ $node->issues_sql_like(
qr/^(?!.*VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar).*$/s,
'vacuumdb --exclude-schema');
$node->issues_sql_like(
[ 'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema', '"Bar"', 'postgres' ],
[
'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema',
'"Bar"', 'postgres'
],
qr/^(?!.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
| VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz).*$/sx,
'vacuumdb multiple --exclude-schema switches');

View File

@ -22,12 +22,13 @@ $node->safe_psql(
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
$node->command_ok([ 'vacuumdb', '-a' ],
'invalid database not targeted by vacuumdb -a');
'invalid database not targeted by vacuumdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 010_vacuumdb.pl as well.
$node->command_fails_like([ 'vacuumdb', '-d', 'regression_invalid'],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'vacuumdb cannot target invalid database');
$node->command_fails_like(
[ 'vacuumdb', '-d', 'regression_invalid' ],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'vacuumdb cannot target invalid database');
done_testing();

View File

@ -173,7 +173,7 @@ typedef struct
{
size_t len;
char *prod;
} td_entry;
} td_entry;
#define TD_ENTRY(PROD) { sizeof(PROD) - 1, (PROD) }
@ -181,30 +181,30 @@ static td_entry td_parser_table[JSON_NUM_NONTERMINALS][JSON_NUM_TERMINALS] =
{
/* JSON */
[OFS(JSON_NT_JSON)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_SCALAR_STRING),
[OFS(JSON_NT_JSON)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_SCALAR_NUMBER),
[OFS(JSON_NT_JSON)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_SCALAR_TRUE),
[OFS(JSON_NT_JSON)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_SCALAR_FALSE),
[OFS(JSON_NT_JSON)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_SCALAR_NULL),
[OFS(JSON_NT_JSON)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY),
[OFS(JSON_NT_JSON)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_OBJECT),
[OFS(JSON_NT_JSON)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_SCALAR_NUMBER),
[OFS(JSON_NT_JSON)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_SCALAR_TRUE),
[OFS(JSON_NT_JSON)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_SCALAR_FALSE),
[OFS(JSON_NT_JSON)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_SCALAR_NULL),
[OFS(JSON_NT_JSON)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY),
[OFS(JSON_NT_JSON)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_OBJECT),
/* ARRAY_ELEMENTS */
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
[OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* MORE_ARRAY_ELEMENTS */
[OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_ARRAY_ELEMENTS),
[OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
[OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_ARRAY_ELEMENTS),
[OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* KEY_PAIRS */
[OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_KEY_PAIRS),
[OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
[OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_KEY_PAIRS),
[OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* MORE_KEY_PAIRS */
[OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_KEY_PAIRS),
[OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
[OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_KEY_PAIRS),
[OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
};
/* the GOAL production. Not stored in the table, but will be the initial contents of the prediction stack */

View File

@ -28,7 +28,7 @@ static size_t convert_case(char *dst, size_t dstsize, const char *src, ssize_t s
pg_wchar
unicode_lowercase_simple(pg_wchar code)
{
const pg_case_map *map = find_case_map(code);
const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseLower] : code;
}
@ -36,7 +36,7 @@ unicode_lowercase_simple(pg_wchar code)
pg_wchar
unicode_titlecase_simple(pg_wchar code)
{
const pg_case_map *map = find_case_map(code);
const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseTitle] : code;
}
@ -44,7 +44,7 @@ unicode_titlecase_simple(pg_wchar code)
pg_wchar
unicode_uppercase_simple(pg_wchar code)
{
const pg_case_map *map = find_case_map(code);
const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseUpper] : code;
}
@ -156,7 +156,7 @@ convert_case(char *dst, size_t dstsize, const char *src, ssize_t srclen,
{
pg_wchar u1 = utf8_to_unicode((unsigned char *) src + srcoff);
int u1len = unicode_utf8len(u1);
const pg_case_map *casemap = find_case_map(u1);
const pg_case_map *casemap = find_case_map(u1);
if (str_casekind == CaseTitle)
{
@ -210,7 +210,7 @@ find_case_map(pg_wchar ucs)
Assert(lengthof(case_map) >= 0x80);
if (ucs < 0x80)
{
const pg_case_map *map = &case_map[ucs];
const pg_case_map *map = &case_map[ucs];
Assert(map->codepoint == ucs);
return map;

View File

@ -75,7 +75,7 @@
#define PG_U_CHARACTER_TAB 0x09
static bool range_search(const pg_unicode_range * tbl, size_t size,
static bool range_search(const pg_unicode_range *tbl, size_t size,
pg_wchar code);
/*
@ -478,7 +478,7 @@ unicode_category_abbrev(pg_unicode_category category)
* given table.
*/
static bool
range_search(const pg_unicode_range * tbl, size_t size, pg_wchar code)
range_search(const pg_unicode_range *tbl, size_t size, pg_wchar code)
{
int min = 0;
int mid;

View File

@ -47,9 +47,10 @@
aggfinalfn => 'interval_avg', aggcombinefn => 'interval_avg_combine',
aggserialfn => 'interval_avg_serialize',
aggdeserialfn => 'interval_avg_deserialize',
aggmtransfn => 'interval_avg_accum', aggminvtransfn => 'interval_avg_accum_inv',
aggmfinalfn => 'interval_avg', aggtranstype => 'internal',
aggtransspace => '40', aggmtranstype => 'internal', aggmtransspace => '40' },
aggmtransfn => 'interval_avg_accum',
aggminvtransfn => 'interval_avg_accum_inv', aggmfinalfn => 'interval_avg',
aggtranstype => 'internal', aggtransspace => '40',
aggmtranstype => 'internal', aggmtransspace => '40' },
# sum
{ aggfnoid => 'sum(int8)', aggtransfn => 'int8_avg_accum',
@ -77,9 +78,10 @@
aggfinalfn => 'interval_sum', aggcombinefn => 'interval_avg_combine',
aggserialfn => 'interval_avg_serialize',
aggdeserialfn => 'interval_avg_deserialize',
aggmtransfn => 'interval_avg_accum', aggminvtransfn => 'interval_avg_accum_inv',
aggmfinalfn => 'interval_sum', aggtranstype => 'internal',
aggtransspace => '40', aggmtranstype => 'internal', aggmtransspace => '40'},
aggmtransfn => 'interval_avg_accum',
aggminvtransfn => 'interval_avg_accum_inv', aggmfinalfn => 'interval_sum',
aggtranstype => 'internal', aggtransspace => '40',
aggmtranstype => 'internal', aggmtransspace => '40' },
{ aggfnoid => 'sum(numeric)', aggtransfn => 'numeric_avg_accum',
aggfinalfn => 'numeric_sum', aggcombinefn => 'numeric_avg_combine',
aggserialfn => 'numeric_avg_serialize',

View File

@ -30,7 +30,8 @@
descr => 'sorts using the Unicode Collation Algorithm with default settings',
collname => 'unicode', collprovider => 'i', collencoding => '-1',
colllocale => 'und' },
{ oid => '811', descr => 'sorts by Unicode code point; Unicode and POSIX character semantics',
{ oid => '811',
descr => 'sorts by Unicode code point; Unicode and POSIX character semantics',
collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6',
colllocale => 'C.UTF-8', collversion => '1' },

View File

@ -16,9 +16,9 @@
descr => 'default template for new databases',
datname => 'template1', encoding => 'ENCODING',
datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
datallowconn => 't', dathasloginevt => 'f', datconnlimit => '-1', datfrozenxid => '0',
datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE',
datctype => 'LC_CTYPE', datlocale => 'DATLOCALE',
datallowconn => 't', dathasloginevt => 'f', datconnlimit => '-1',
datfrozenxid => '0', datminmxid => '1', dattablespace => 'pg_default',
datcollate => 'LC_COLLATE', datctype => 'LC_CTYPE', datlocale => 'DATLOCALE',
daticurules => 'ICU_RULES', datacl => '_null_' },
]

View File

@ -3383,12 +3383,12 @@
prosrc => 'drandom_normal' },
{ oid => '9719', descr => 'random integer in range',
proname => 'random', provolatile => 'v', proparallel => 'r',
prorettype => 'int4', proargtypes => 'int4 int4',
proargnames => '{min,max}', prosrc => 'int4random' },
prorettype => 'int4', proargtypes => 'int4 int4', proargnames => '{min,max}',
prosrc => 'int4random' },
{ oid => '9720', descr => 'random bigint in range',
proname => 'random', provolatile => 'v', proparallel => 'r',
prorettype => 'int8', proargtypes => 'int8 int8',
proargnames => '{min,max}', prosrc => 'int8random' },
prorettype => 'int8', proargtypes => 'int8 int8', proargnames => '{min,max}',
prosrc => 'int8random' },
{ oid => '9721', descr => 'random numeric in range',
proname => 'random', provolatile => 'v', proparallel => 'r',
prorettype => 'numeric', proargtypes => 'numeric numeric',
@ -4932,9 +4932,8 @@
prosrc => 'numeric_poly_stddev_samp' },
{ oid => '1843', descr => 'aggregate transition function',
proname => 'interval_avg_accum', proisstrict => 'f',
prorettype => 'internal', proargtypes => 'internal interval',
prosrc => 'interval_avg_accum' },
proname => 'interval_avg_accum', proisstrict => 'f', prorettype => 'internal',
proargtypes => 'internal interval', prosrc => 'interval_avg_accum' },
{ oid => '3325', descr => 'aggregate combine function',
proname => 'interval_avg_combine', proisstrict => 'f',
prorettype => 'internal', proargtypes => 'internal internal',
@ -5743,13 +5742,15 @@
prosrc => 'pg_stat_get_checkpointer_restartpoints_timed' },
{ oid => '8744',
descr => 'statistics: number of backend requested restartpoints started by the checkpointer',
proname => 'pg_stat_get_checkpointer_restartpoints_requested', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => '',
proname => 'pg_stat_get_checkpointer_restartpoints_requested',
provolatile => 's', proparallel => 'r', prorettype => 'int8',
proargtypes => '',
prosrc => 'pg_stat_get_checkpointer_restartpoints_requested' },
{ oid => '8745',
descr => 'statistics: number of backend performed restartpoints',
proname => 'pg_stat_get_checkpointer_restartpoints_performed', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => '',
proname => 'pg_stat_get_checkpointer_restartpoints_performed',
provolatile => 's', proparallel => 'r', prorettype => 'int8',
proargtypes => '',
prosrc => 'pg_stat_get_checkpointer_restartpoints_performed' },
{ oid => '2771',
descr => 'statistics: number of buffers written during checkpoints and restartpoints',
@ -7466,8 +7467,9 @@
proname => 'pg_column_compression', provolatile => 's', prorettype => 'text',
proargtypes => 'any', prosrc => 'pg_column_compression' },
{ oid => '8393', descr => 'chunk ID of on-disk TOASTed value',
proname => 'pg_column_toast_chunk_id', provolatile => 's', prorettype => 'oid',
proargtypes => 'any', prosrc => 'pg_column_toast_chunk_id' },
proname => 'pg_column_toast_chunk_id', provolatile => 's',
prorettype => 'oid', proargtypes => 'any',
prosrc => 'pg_column_toast_chunk_id' },
{ oid => '2322',
descr => 'total disk space usage for the specified tablespace',
proname => 'pg_tablespace_size', provolatile => 'v', prorettype => 'int8',
@ -8837,8 +8839,8 @@
proname => 'text', prorettype => 'text', proargtypes => 'xml',
prosrc => 'xmltotext' },
{ oid => '3813', descr => 'generate XML text node',
proname => 'xmltext', proisstrict => 't', prorettype => 'xml',
proargtypes => 'text', prosrc => 'xmltext' },
proname => 'xmltext', prorettype => 'xml', proargtypes => 'text',
prosrc => 'xmltext' },
{ oid => '2923', descr => 'map table contents to XML',
proname => 'table_to_xml', procost => '100', provolatile => 's',
@ -10054,8 +10056,8 @@
prorettype => 'anyelement', proargtypes => 'anyelement jsonb',
prosrc => 'jsonb_populate_record' },
{ oid => '9558', descr => 'test get record fields from a jsonb object',
proname => 'jsonb_populate_record_valid', proisstrict => 'f', provolatile => 's',
prorettype => 'bool', proargtypes => 'anyelement jsonb',
proname => 'jsonb_populate_record_valid', proisstrict => 'f',
provolatile => 's', prorettype => 'bool', proargtypes => 'anyelement jsonb',
prosrc => 'jsonb_populate_record_valid' },
{ oid => '3475',
descr => 'get set of records with fields from a jsonb array of objects',
@ -11233,9 +11235,10 @@
proname => 'pg_logical_emit_message', provolatile => 'v', proparallel => 'u',
prorettype => 'pg_lsn', proargtypes => 'bool text bytea bool',
prosrc => 'pg_logical_emit_message_bytea' },
{ oid => '9929', descr => 'sync replication slots from the primary to the standby',
proname => 'pg_sync_replication_slots', provolatile => 'v', proparallel => 'u',
prorettype => 'void', proargtypes => '',
{ oid => '9929',
descr => 'sync replication slots from the primary to the standby',
proname => 'pg_sync_replication_slots', provolatile => 'v',
proparallel => 'u', prorettype => 'void', proargtypes => '',
prosrc => 'pg_sync_replication_slots' },
# event triggers
@ -11447,7 +11450,8 @@
proname => 'binary_upgrade_logical_slot_has_caught_up', provolatile => 'v',
proparallel => 'u', prorettype => 'bool', proargtypes => 'name',
prosrc => 'binary_upgrade_logical_slot_has_caught_up' },
{ oid => '8404', descr => 'for use by pg_upgrade (relation for pg_subscription_rel)',
{ oid => '8404',
descr => 'for use by pg_upgrade (relation for pg_subscription_rel)',
proname => 'binary_upgrade_add_sub_rel_state', proisstrict => 'f',
provolatile => 'v', proparallel => 'u', prorettype => 'void',
proargtypes => 'text oid char pg_lsn',
@ -11455,8 +11459,7 @@
{ oid => '8405', descr => 'for use by pg_upgrade (remote_lsn for origin)',
proname => 'binary_upgrade_replorigin_advance', proisstrict => 'f',
provolatile => 'v', proparallel => 'u', prorettype => 'void',
proargtypes => 'text pg_lsn',
prosrc => 'binary_upgrade_replorigin_advance' },
proargtypes => 'text pg_lsn', prosrc => 'binary_upgrade_replorigin_advance' },
# conversion functions
{ oid => '4302',
@ -12161,38 +12164,30 @@
proname => 'any_value_transfn', prorettype => 'anyelement',
proargtypes => 'anyelement anyelement', prosrc => 'any_value_transfn' },
{ oid => '8436',
descr => 'list of available WAL summary files',
proname => 'pg_available_wal_summaries', prorows => '100',
proretset => 't', provolatile => 'v', proparallel => 's',
prorettype => 'record', proargtypes => '',
proallargtypes => '{int8,pg_lsn,pg_lsn}',
proargmodes => '{o,o,o}',
{ oid => '8436', descr => 'list of available WAL summary files',
proname => 'pg_available_wal_summaries', prorows => '100', proretset => 't',
provolatile => 'v', prorettype => 'record', proargtypes => '',
proallargtypes => '{int8,pg_lsn,pg_lsn}', proargmodes => '{o,o,o}',
proargnames => '{tli,start_lsn,end_lsn}',
prosrc => 'pg_available_wal_summaries' },
{ oid => '8437',
descr => 'contents of a WAL summary file',
proname => 'pg_wal_summary_contents', prorows => '100',
proretset => 't', provolatile => 'v', proparallel => 's',
prorettype => 'record', proargtypes => 'int8 pg_lsn pg_lsn',
{ oid => '8437', descr => 'contents of a WAL summary file',
proname => 'pg_wal_summary_contents', prorows => '100', proretset => 't',
provolatile => 'v', prorettype => 'record',
proargtypes => 'int8 pg_lsn pg_lsn',
proallargtypes => '{int8,pg_lsn,pg_lsn,oid,oid,oid,int2,int8,bool}',
proargmodes => '{i,i,i,o,o,o,o,o,o}',
proargnames => '{tli,start_lsn,end_lsn,relfilenode,reltablespace,reldatabase,relforknumber,relblocknumber,is_limit_block}',
prosrc => 'pg_wal_summary_contents' },
{ oid => '8438',
descr => 'WAL summarizer state',
proname => 'pg_get_wal_summarizer_state',
provolatile => 'v', proparallel => 's',
{ oid => '8438', descr => 'WAL summarizer state',
proname => 'pg_get_wal_summarizer_state', provolatile => 'v',
prorettype => 'record', proargtypes => '',
proallargtypes => '{int8,pg_lsn,pg_lsn,int4}',
proargmodes => '{o,o,o,o}',
proallargtypes => '{int8,pg_lsn,pg_lsn,int4}', proargmodes => '{o,o,o,o}',
proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}',
prosrc => 'pg_get_wal_summarizer_state' },
# GiST stratnum implementations
{ oid => '8047', descr => 'GiST support',
proname => 'gist_stratnum_identity', prorettype => 'int2',
proargtypes => 'int2',
prosrc => 'gist_stratnum_identity' },
proargtypes => 'int2', prosrc => 'gist_stratnum_identity' },
]

View File

@ -206,7 +206,8 @@
typname => 'polygon', typlen => '-1', typbyval => 'f', typcategory => 'G',
typinput => 'poly_in', typoutput => 'poly_out', typreceive => 'poly_recv',
typsend => 'poly_send', typalign => 'd', typstorage => 'x' },
{ oid => '628', array_type_oid => '629', descr => 'geometric line, formats \'{A,B,C}\'/\'[point1,point2]\'',
{ oid => '628', array_type_oid => '629',
descr => 'geometric line, formats \'{A,B,C}\'/\'[point1,point2]\'',
typname => 'line', typlen => '24', typbyval => 'f', typcategory => 'G',
typsubscript => 'raw_array_subscript_handler', typelem => 'float8',
typinput => 'line_in', typoutput => 'line_out', typreceive => 'line_recv',
@ -633,9 +634,8 @@
typoutput => 'tsm_handler_out', typreceive => '-', typsend => '-',
typalign => 'i' },
{ oid => '269',
typname => 'table_am_handler',
descr => 'pseudo-type for the result of a table AM handler function',
typlen => '4', typbyval => 't', typtype => 'p',
typname => 'table_am_handler', typlen => '4', typbyval => 't', typtype => 'p',
typcategory => 'P', typinput => 'table_am_handler_in',
typoutput => 'table_am_handler_out', typreceive => '-', typsend => '-',
typalign => 'i' },
@ -687,7 +687,8 @@
typoutput => 'brin_bloom_summary_out',
typreceive => 'brin_bloom_summary_recv', typsend => 'brin_bloom_summary_send',
typalign => 'i', typstorage => 'x', typcollation => 'default' },
{ oid => '4601', descr => 'pseudo-type representing BRIN minmax-multi summary',
{ oid => '4601',
descr => 'pseudo-type representing BRIN minmax-multi summary',
typname => 'pg_brin_minmax_multi_summary', typlen => '-1', typbyval => 'f',
typcategory => 'Z', typinput => 'brin_minmax_multi_summary_in',
typoutput => 'brin_minmax_multi_summary_out',

View File

@ -26,13 +26,13 @@ typedef enum
CaseTitle = 1,
CaseUpper = 2,
NCaseKind
} CaseKind;
} CaseKind;
typedef struct
{
pg_wchar codepoint; /* Unicode codepoint */
pg_wchar simplemap[NCaseKind];
} pg_case_map;
} pg_case_map;
/*
* Case mapping table. Dense for codepoints < 0x80 (enabling fast lookup),

View File

@ -23,19 +23,19 @@ typedef struct
uint32 first; /* Unicode codepoint */
uint32 last; /* Unicode codepoint */
uint8 category; /* General Category */
} pg_category_range;
} pg_category_range;
typedef struct
{
uint32 first; /* Unicode codepoint */
uint32 last; /* Unicode codepoint */
} pg_unicode_range;
} pg_unicode_range;
typedef struct
{
uint8 category;
uint8 properties;
} pg_unicode_properties;
} pg_unicode_properties;
/*
* The properties currently used, in no particular order. Fits in a uint8, but

View File

@ -86,7 +86,8 @@ if (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\blibpq_encryption\b/)
# Only run the GSSAPI tests when compiled with GSSAPI support and
# PG_TEST_EXTRA includes 'kerberos'
my $gss_supported = $ENV{with_gssapi} eq 'yes';
my $kerberos_enabled = $ENV{PG_TEST_EXTRA} && $ENV{PG_TEST_EXTRA} =~ /\bkerberos\b/;
my $kerberos_enabled =
$ENV{PG_TEST_EXTRA} && $ENV{PG_TEST_EXTRA} =~ /\bkerberos\b/;
my $ssl_supported = $ENV{with_ssl} eq 'openssl';
###
@ -127,7 +128,8 @@ if ($gss_supported != 0 && $kerberos_enabled != 0)
my $realm = 'EXAMPLE.COM';
$krb = PostgreSQL::Test::Kerberos->new($host, $hostaddr, $realm);
$node->append_conf('postgresql.conf', "krb_server_keyfile = '$krb->{keytab}'\n");
$node->append_conf('postgresql.conf',
"krb_server_keyfile = '$krb->{keytab}'\n");
}
if ($ssl_supported != 0)
@ -159,7 +161,8 @@ chomp($unixdir);
# Helper function that returns the encryption method in use in the
# connection.
$node->safe_psql('postgres', q{
$node->safe_psql(
'postgres', q{
CREATE FUNCTION current_enc() RETURNS text LANGUAGE plpgsql AS $$
DECLARE
ssl_in_use bool;
@ -206,7 +209,8 @@ $node->reload;
# Ok, all prepared. Run the tests.
my @all_test_users = ('testuser', 'ssluser', 'nossluser', 'gssuser', 'nogssuser');
my @all_test_users =
('testuser', 'ssluser', 'nossluser', 'gssuser', 'nogssuser');
my @all_gssencmodes = ('disable', 'prefer', 'require');
my @all_sslmodes = ('disable', 'allow', 'prefer', 'require');
my @all_sslnegotiations = ('postgres', 'direct', 'requiredirect');
@ -220,7 +224,8 @@ my $server_config = {
### Run tests with GSS and SSL disabled in the server
###
my $test_table;
if ($ssl_supported) {
if ($ssl_supported)
{
$test_table = q{
# USER GSSENCMODE SSLMODE SSLNEGOTIATION EVENTS -> OUTCOME
testuser disable disable * connect, authok -> plain
@ -240,7 +245,9 @@ testuser disable disable * connect, authok
. . . direct connect, directsslreject, reconnect, sslreject -> fail
. . . requiredirect connect, directsslreject -> fail
};
} else {
}
else
{
# Compiled without SSL support
$test_table = q{
# USER GSSENCMODE SSLMODE SSLNEGOTIATION EVENTS -> OUTCOME
@ -268,8 +275,8 @@ testuser require * * - -> fail
note("Running tests with SSL and GSS disabled in the server");
test_matrix($node, $server_config,
['testuser'], \@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations,
parse_table($test_table));
['testuser'], \@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations,
parse_table($test_table));
###
@ -317,10 +324,11 @@ nossluser . disable * connect, authok
$server_config->{server_ssl} = 1;
note("Running tests with SSL enabled in server");
test_matrix($node, $server_config,
['testuser', 'ssluser', 'nossluser'],
['disable'], \@all_sslmodes, \@all_sslnegotiations,
parse_table($test_table));
test_matrix(
$node, $server_config,
[ 'testuser', 'ssluser', 'nossluser' ], ['disable'],
\@all_sslmodes, \@all_sslnegotiations,
parse_table($test_table));
# Disable SSL again
$node->adjust_conf('postgresql.conf', 'ssl', 'off');
@ -399,17 +407,20 @@ nogssuser disable disable * connect, authok
# even connecting to the server. Skip those, because we tested
# them earlier already.
my ($sslmodes, $sslnegotiations);
if ($ssl_supported != 0) {
($sslmodes, $sslnegotiations) = (\@all_sslmodes, \@all_sslnegotiations);
} else {
if ($ssl_supported != 0)
{
($sslmodes, $sslnegotiations) =
(\@all_sslmodes, \@all_sslnegotiations);
}
else
{
($sslmodes, $sslnegotiations) = (['disable'], ['postgres']);
}
note("Running tests with GSS enabled in server");
test_matrix($node, $server_config,
['testuser', 'gssuser', 'nogssuser'],
\@all_gssencmodes, $sslmodes, $sslnegotiations,
parse_table($test_table));
test_matrix($node, $server_config, [ 'testuser', 'gssuser', 'nogssuser' ],
\@all_gssencmodes, $sslmodes, $sslnegotiations,
parse_table($test_table));
}
###
@ -422,7 +433,10 @@ SKIP:
skip "kerberos not enabled in PG_TEST_EXTRA" if $kerberos_enabled == 0;
# Sanity check that GSSAPI is still enabled from previous test.
connect_test($node, 'user=testuser gssencmode=prefer sslmode=prefer', 'connect, gssaccept, authok -> gss');
connect_test(
$node,
'user=testuser gssencmode=prefer sslmode=prefer',
'connect, gssaccept, authok -> gss');
# Enable SSL
$node->adjust_conf('postgresql.conf', 'ssl', 'on');
@ -528,10 +542,14 @@ nossluser disable disable * connect, authok
};
note("Running tests with both GSS and SSL enabled in server");
test_matrix($node, $server_config,
['testuser', 'gssuser', 'ssluser', 'nogssuser', 'nossluser'],
\@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations,
parse_table($test_table));
test_matrix(
$node,
$server_config,
[ 'testuser', 'gssuser', 'ssluser', 'nogssuser', 'nossluser' ],
\@all_gssencmodes,
\@all_sslmodes,
\@all_sslnegotiations,
parse_table($test_table));
}
###
@ -543,8 +561,13 @@ SKIP:
# libpq doesn't attempt SSL or GSSAPI over Unix domain
# sockets. The server would reject them too.
connect_test($node, "user=localuser gssencmode=prefer sslmode=prefer host=$unixdir", 'connect, authok -> plain');
connect_test($node, "user=localuser gssencmode=require sslmode=prefer host=$unixdir", '- -> fail');
connect_test(
$node,
"user=localuser gssencmode=prefer sslmode=prefer host=$unixdir",
'connect, authok -> plain');
connect_test($node,
"user=localuser gssencmode=require sslmode=prefer host=$unixdir",
'- -> fail');
}
done_testing();
@ -558,7 +581,8 @@ sub test_matrix
local $Test::Builder::Level = $Test::Builder::Level + 1;
my ($pg_node, $node_conf,
$test_users, $gssencmodes, $sslmodes, $sslnegotiations, %expected) = @_;
$test_users, $gssencmodes, $sslmodes, $sslnegotiations, %expected)
= @_;
foreach my $test_user (@{$test_users})
{
@ -572,10 +596,15 @@ sub test_matrix
{
$key = "$test_user $gssencmode $client_mode $negotiation";
$expected_events = $expected{$key};
if (!defined($expected_events)) {
$expected_events = "<line missing from expected output table>";
if (!defined($expected_events))
{
$expected_events =
"<line missing from expected output table>";
}
connect_test($pg_node, "user=$test_user gssencmode=$gssencmode sslmode=$client_mode sslnegotiation=$negotiation", $expected_events);
connect_test(
$pg_node,
"user=$test_user gssencmode=$gssencmode sslmode=$client_mode sslnegotiation=$negotiation",
$expected_events);
}
}
}
@ -594,7 +623,8 @@ sub connect_test
my $connstr_full = "";
$connstr_full .= "dbname=postgres " unless $connstr =~ m/dbname=/;
$connstr_full .= "host=$host hostaddr=$hostaddr " unless $connstr =~ m/host=/;
$connstr_full .= "host=$host hostaddr=$hostaddr "
unless $connstr =~ m/host=/;
$connstr_full .= $connstr;
# Get the current size of the logfile before running the test.
@ -614,7 +644,7 @@ sub connect_test
my ($ret, $stdout, $stderr) = $node->psql(
'postgres',
'',
extra_params => ['-w', '-c', 'SELECT current_enc()'],
extra_params => [ '-w', '-c', 'SELECT current_enc()' ],
connstr => "$connstr_full",
on_error_stop => 0);
@ -628,7 +658,8 @@ sub connect_test
# Check that the events and outcome match the expected events and
# outcome
my $events_and_outcome = join(', ', @events) . " -> $outcome";
is($events_and_outcome, $expected_events_and_outcome, $test_name) or diag("$stderr");
is($events_and_outcome, $expected_events_and_outcome, $test_name)
or diag("$stderr");
}
# Parse a test table. See comment at top of the file for the format.
@ -640,7 +671,8 @@ sub parse_table
my %expected;
my ($user, $gssencmode, $sslmode, $sslnegotiation);
foreach my $line (@lines) {
foreach my $line (@lines)
{
# Trim comments
$line =~ s/#.*$//;
@ -652,7 +684,8 @@ sub parse_table
# Ignore empty lines (includes comment-only lines)
next if $line eq '';
$line =~ m/^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S.*)\s*->\s*(\S+)\s*$/ or die "could not parse line \"$line\"";
$line =~ m/^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S.*)\s*->\s*(\S+)\s*$/
or die "could not parse line \"$line\"";
$user = $1 unless $1 eq ".";
$gssencmode = $2 unless $2 eq ".";
$sslmode = $3 unless $3 eq ".";
@ -662,10 +695,12 @@ sub parse_table
my @events = split /,\s*/, $5;
my $outcome = $6;
my $events_str = join(', ', @events);
$events_str =~ s/\s+$//; # trim whitespace
$events_str =~ s/\s+$//; # trim whitespace
my $events_and_outcome = "$events_str -> $outcome";
my %expanded = expand_expected_line($user, $gssencmode, $sslmode, $sslnegotiation, $events_and_outcome);
my %expanded =
expand_expected_line($user, $gssencmode, $sslmode, $sslnegotiation,
$events_and_outcome);
%expected = (%expected, %expanded);
}
return %expected;
@ -677,23 +712,48 @@ sub expand_expected_line
my ($user, $gssencmode, $sslmode, $sslnegotiation, $expected) = @_;
my %result;
if ($user eq '*') {
foreach my $x (@all_test_users) {
%result = (%result, expand_expected_line($x, $gssencmode, $sslmode, $sslnegotiation, $expected));
if ($user eq '*')
{
foreach my $x (@all_test_users)
{
%result = (
%result,
expand_expected_line(
$x, $gssencmode, $sslmode, $sslnegotiation, $expected));
}
} elsif ($gssencmode eq '*') {
foreach my $x (@all_gssencmodes) {
%result = (%result, expand_expected_line($user, $x, $sslmode, $sslnegotiation, $expected));
}
elsif ($gssencmode eq '*')
{
foreach my $x (@all_gssencmodes)
{
%result = (
%result,
expand_expected_line(
$user, $x, $sslmode, $sslnegotiation, $expected));
}
} elsif ($sslmode eq '*') {
foreach my $x (@all_sslmodes) {
%result = (%result, expand_expected_line($user, $gssencmode, $x, $sslnegotiation, $expected));
}
elsif ($sslmode eq '*')
{
foreach my $x (@all_sslmodes)
{
%result = (
%result,
expand_expected_line(
$user, $gssencmode, $x, $sslnegotiation, $expected));
}
} elsif ($sslnegotiation eq '*') {
foreach my $x (@all_sslnegotiations) {
%result = (%result, expand_expected_line($user, $gssencmode, $sslmode, $x, $expected));
}
elsif ($sslnegotiation eq '*')
{
foreach my $x (@all_sslnegotiations)
{
%result = (
%result,
expand_expected_line(
$user, $gssencmode, $sslmode, $x, $expected));
}
} else {
}
else
{
$result{"$user $gssencmode $sslmode $sslnegotiation"} = $expected;
}
return %result;
@ -708,13 +768,18 @@ sub parse_log_events
my @events = ();
my @lines = split /\n/, $log_contents;
foreach my $line (@lines) {
push @events, "reconnect" if $line =~ /connection received/ && scalar(@events) > 0;
push @events, "connect" if $line =~ /connection received/ && scalar(@events) == 0;
foreach my $line (@lines)
{
push @events, "reconnect"
if $line =~ /connection received/ && scalar(@events) > 0;
push @events, "connect"
if $line =~ /connection received/ && scalar(@events) == 0;
push @events, "sslaccept" if $line =~ /SSLRequest accepted/;
push @events, "sslreject" if $line =~ /SSLRequest rejected/;
push @events, "directsslaccept" if $line =~ /direct SSL connection accepted/;
push @events, "directsslreject" if $line =~ /direct SSL connection rejected/;
push @events, "directsslaccept"
if $line =~ /direct SSL connection accepted/;
push @events, "directsslreject"
if $line =~ /direct SSL connection rejected/;
push @events, "gssaccept" if $line =~ /GSSENCRequest accepted/;
push @events, "gssreject" if $line =~ /GSSENCRequest rejected/;
push @events, "authfail" if $line =~ /no pg_hba.conf entry/;
@ -722,8 +787,9 @@ sub parse_log_events
}
# No events at all is represented by "-"
if (scalar @events == 0) {
push @events, "-"
if (scalar @events == 0)
{
push @events, "-";
}
return @events;

View File

@ -92,7 +92,7 @@ pg_popcount_masked_avx512(const char *buf, int bytes, bits8 mask)
const char *final;
int tail_idx;
__mmask64 bmask = ~UINT64CONST(0);
const __m512i maskv = _mm512_set1_epi8(mask);
const __m512i maskv = _mm512_set1_epi8(mask);
/*
* Align buffer down to avoid double load overhead from unaligned access.

View File

@ -180,7 +180,8 @@ my %pgdump_runs = (
# (undumped) extension tables
privileged_internals => {
dump_cmd => [
'pg_dump', '--no-sync', "--file=$tempdir/privileged_internals.sql",
'pg_dump', '--no-sync',
"--file=$tempdir/privileged_internals.sql",
# these two tables are irrelevant to the test case
'--exclude-table=regress_pg_dump_schema.external_tab',
'--exclude-table=regress_pg_dump_schema.extdependtab',
@ -222,15 +223,18 @@ my %pgdump_runs = (
},
exclude_extension => {
dump_cmd => [
'pg_dump', '--no-sync', "--file=$tempdir/exclude_extension.sql",
'pg_dump', '--no-sync',
"--file=$tempdir/exclude_extension.sql",
'--exclude-extension=test_pg_dump', 'postgres',
],
},
exclude_extension_filter => {
dump_cmd => [
'pg_dump', '--no-sync',
'pg_dump',
'--no-sync',
"--file=$tempdir/exclude_extension_filter.sql",
"--filter=$tempdir/exclude_extension_filter.txt", 'postgres',
"--filter=$tempdir/exclude_extension_filter.txt",
'postgres',
],
},

View File

@ -112,7 +112,7 @@ static rt_node_class_test_elem rt_node_class_tests[] =
* Return the number of keys in the radix tree.
*/
static uint64
rt_num_entries(rt_radix_tree * tree)
rt_num_entries(rt_radix_tree *tree)
{
return tree->ctl->num_keys;
}
@ -209,7 +209,7 @@ test_basic(rt_node_class_test_elem *test_info, int shift, bool asc)
* false.
*/
for (int i = 0; i < children; i++)
EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) & keys[i]));
EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) &keys[i]));
rt_stats(radixtree);
@ -231,14 +231,14 @@ test_basic(rt_node_class_test_elem *test_info, int shift, bool asc)
TestValueType update = keys[i] + 1;
/* rt_set should report the key found */
EXPECT_TRUE(rt_set(radixtree, keys[i], (TestValueType *) & update));
EXPECT_TRUE(rt_set(radixtree, keys[i], (TestValueType *) &update));
}
/* delete and re-insert keys */
for (int i = 0; i < children; i++)
{
EXPECT_TRUE(rt_delete(radixtree, keys[i]));
EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) & keys[i]));
EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) &keys[i]));
}
/* look up keys after deleting and re-inserting */

View File

@ -838,20 +838,20 @@ sub init_from_backup
my $data_path = $self->data_dir;
if (defined $params{combine_with_prior})
{
my @prior_backups = @{$params{combine_with_prior}};
my @prior_backups = @{ $params{combine_with_prior} };
my @prior_backup_path;
for my $prior_backup_name (@prior_backups)
{
push @prior_backup_path,
$root_node->backup_dir . '/' . $prior_backup_name;
$root_node->backup_dir . '/' . $prior_backup_name;
}
local %ENV = $self->_get_env();
my @combineargs = ('pg_combinebackup', '-d');
if (exists $params{tablespace_map})
{
while (my ($olddir, $newdir) = each %{$params{tablespace_map}})
while (my ($olddir, $newdir) = each %{ $params{tablespace_map} })
{
push @combineargs, "-T$olddir=$newdir";
}
@ -872,24 +872,25 @@ sub init_from_backup
# We need to generate a tablespace_map file.
open(my $tsmap, ">", "$data_path/tablespace_map")
|| die "$data_path/tablespace_map: $!";
|| die "$data_path/tablespace_map: $!";
# Extract tarfiles and add tablespace_map entries
my @tstars = grep { /^\d+.tar/ }
PostgreSQL::Test::Utils::slurp_dir($backup_path);
PostgreSQL::Test::Utils::slurp_dir($backup_path);
for my $tstar (@tstars)
{
my $tsoid = $tstar;
$tsoid =~ s/\.tar$//;
die "no tablespace mapping for $tstar"
if !exists $params{tablespace_map} ||
!exists $params{tablespace_map}{$tsoid};
if !exists $params{tablespace_map}
|| !exists $params{tablespace_map}{$tsoid};
my $newdir = $params{tablespace_map}{$tsoid};
mkdir($newdir) || die "mkdir $newdir: $!";
PostgreSQL::Test::Utils::system_or_bail($params{tar_program}, 'xf',
$backup_path . '/' . $tstar, '-C', $newdir);
PostgreSQL::Test::Utils::system_or_bail($params{tar_program},
'xf', $backup_path . '/' . $tstar,
'-C', $newdir);
my $escaped_newdir = $newdir;
$escaped_newdir =~ s/\\/\\\\/g;
@ -906,11 +907,13 @@ sub init_from_backup
# Copy the main backup. If we see a tablespace directory for which we
# have a tablespace mapping, skip it, but remember that we saw it.
PostgreSQL::Test::RecursiveCopy::copypath($backup_path, $data_path,
PostgreSQL::Test::RecursiveCopy::copypath(
$backup_path,
$data_path,
'filterfn' => sub {
my ($path) = @_;
if ($path =~ /^pg_tblspc\/(\d+)$/ &&
exists $params{tablespace_map}{$1})
if ($path =~ /^pg_tblspc\/(\d+)$/
&& exists $params{tablespace_map}{$1})
{
push @tsoids, $1;
return 0;
@ -922,14 +925,14 @@ sub init_from_backup
{
# We need to generate a tablespace_map file.
open(my $tsmap, ">", "$data_path/tablespace_map")
|| die "$data_path/tablespace_map: $!";
|| die "$data_path/tablespace_map: $!";
# Now use the list of tablespace links to copy each tablespace.
for my $tsoid (@tsoids)
{
die "no tablespace mapping for $tsoid"
if !exists $params{tablespace_map} ||
!exists $params{tablespace_map}{$tsoid};
if !exists $params{tablespace_map}
|| !exists $params{tablespace_map}{$tsoid};
my $olddir = $backup_path . '/pg_tblspc/' . $tsoid;
my $newdir = $params{tablespace_map}{$tsoid};
@ -1166,9 +1169,8 @@ sub restart
# -w is now the default but having it here does no harm and helps
# compatibility with older versions.
$ret = PostgreSQL::Test::Utils::system_log(
'pg_ctl', '-w', '-D', $self->data_dir,
'-l', $self->logfile, 'restart');
$ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-w', '-D',
$self->data_dir, '-l', $self->logfile, 'restart');
if ($ret != 0)
{
@ -3370,19 +3372,21 @@ sub validate_slot_inactive_since
my ($self, $slot_name, $reference_time) = @_;
my $name = $self->name;
my $inactive_since = $self->safe_psql('postgres',
my $inactive_since = $self->safe_psql(
'postgres',
qq(SELECT inactive_since FROM pg_replication_slots
WHERE slot_name = '$slot_name' AND inactive_since IS NOT NULL;)
);
);
# Check that the inactive_since is sane
is($self->safe_psql('postgres',
qq[SELECT '$inactive_since'::timestamptz > to_timestamp(0) AND
is( $self->safe_psql(
'postgres',
qq[SELECT '$inactive_since'::timestamptz > to_timestamp(0) AND
'$inactive_since'::timestamptz > '$reference_time'::timestamptz;]
),
't',
"last inactive time for slot $slot_name is valid on node $name")
or die "could not validate captured inactive_since for slot $slot_name";
or die "could not validate captured inactive_since for slot $slot_name";
return $inactive_since;
}

View File

@ -10,10 +10,12 @@ use strict;
use warnings FATAL => 'all';
use PostgreSQL::Test::Utils;
our ($krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit, $klist,
$kdb5_util, $kadmin_local, $krb5kdc,
$krb5_conf, $kdc_conf, $krb5_cache, $krb5_log, $kdc_log,
$kdc_port, $kdc_datadir, $kdc_pidfile, $keytab);
our (
$krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit,
$klist, $kdb5_util, $kadmin_local, $krb5kdc,
$krb5_conf, $kdc_conf, $krb5_cache, $krb5_log,
$kdc_log, $kdc_port, $kdc_datadir, $kdc_pidfile,
$keytab);
INIT
{
@ -178,7 +180,8 @@ $realm = {
key_stash_file = $kdc_datadir/_k5.$realm
}!);
mkdir $kdc_datadir or BAIL_OUT("could not create directory \"$kdc_datadir\"");
mkdir $kdc_datadir
or BAIL_OUT("could not create directory \"$kdc_datadir\"");
# Ensure that we use test's config and cache files, not global ones.
$ENV{'KRB5_CONFIG'} = $krb5_conf;
@ -189,7 +192,8 @@ $realm = {
system_or_bail $kdb5_util, 'create', '-s', '-P', 'secret0';
system_or_bail $kadmin_local, '-q', "addprinc -randkey $service_principal";
system_or_bail $kadmin_local, '-q',
"addprinc -randkey $service_principal";
system_or_bail $kadmin_local, '-q', "ktadd -k $keytab $service_principal";
system_or_bail $krb5kdc, '-P', $kdc_pidfile;
@ -226,7 +230,8 @@ END
# take care not to change the script's exit value
my $exit_code = $?;
kill 'INT', `cat $kdc_pidfile` if defined($kdc_pidfile) && -f $kdc_pidfile;
kill 'INT', `cat $kdc_pidfile`
if defined($kdc_pidfile) && -f $kdc_pidfile;
$? = $exit_code;
}

View File

@ -99,9 +99,11 @@ is($result, qq(33|0|t), 'check streamed sequence content on standby 2');
$node_primary->safe_psql('postgres',
"CREATE UNLOGGED SEQUENCE ulseq; SELECT nextval('ulseq')");
$node_primary->wait_for_replay_catchup($node_standby_1);
is($node_standby_1->safe_psql('postgres',
"SELECT pg_sequence_last_value('ulseq'::regclass) IS NULL"),
't', 'pg_sequence_last_value() on unlogged sequence on standby 1');
is( $node_standby_1->safe_psql(
'postgres',
"SELECT pg_sequence_last_value('ulseq'::regclass) IS NULL"),
't',
'pg_sequence_last_value() on unlogged sequence on standby 1');
# Check that only READ-only queries can run on standbys
is($node_standby_1->psql('postgres', 'INSERT INTO tab_int VALUES (1)'),

View File

@ -56,7 +56,8 @@ $bravo->safe_psql('postgres', 'checkpoint');
# beyond the previous vacuum.
$alpha->safe_psql('postgres', 'create table test2 (a int, b bytea)');
$alpha->safe_psql('postgres',
q{insert into test2 select generate_series(1,10000), sha256(random()::text::bytea)});
q{insert into test2 select generate_series(1,10000), sha256(random()::text::bytea)}
);
$alpha->safe_psql('postgres', 'truncate test2');
# Wait again for all records to be replayed.

View File

@ -443,7 +443,7 @@ $primary4->safe_psql(
# Get inactive_since value after the slot's creation. Note that the slot is
# still inactive till it's used by the standby below.
my $inactive_since =
$primary4->validate_slot_inactive_since($sb4_slot, $slot_creation_time);
$primary4->validate_slot_inactive_since($sb4_slot, $slot_creation_time);
$standby4->start;
@ -502,7 +502,7 @@ $publisher4->safe_psql('postgres',
# Get inactive_since value after the slot's creation. Note that the slot is
# still inactive till it's used by the subscriber below.
$inactive_since =
$publisher4->validate_slot_inactive_since($lsub4_slot, $slot_creation_time);
$publisher4->validate_slot_inactive_since($lsub4_slot, $slot_creation_time);
$subscriber4->start;
$subscriber4->safe_psql('postgres',

View File

@ -178,13 +178,15 @@ sub check_slots_conflict_reason
$res = $node_standby->safe_psql(
'postgres', qq(
select invalidation_reason from pg_replication_slots where slot_name = '$active_slot' and conflicting;));
select invalidation_reason from pg_replication_slots where slot_name = '$active_slot' and conflicting;)
);
is($res, "$reason", "$active_slot reason for conflict is $reason");
$res = $node_standby->safe_psql(
'postgres', qq(
select invalidation_reason from pg_replication_slots where slot_name = '$inactive_slot' and conflicting;));
select invalidation_reason from pg_replication_slots where slot_name = '$inactive_slot' and conflicting;)
);
is($res, "$reason", "$inactive_slot reason for conflict is $reason");
}
@ -559,7 +561,8 @@ check_slots_conflict_reason('vacuum_full_', 'rows_removed');
##################################################
# Get the restart_lsn from an invalidated slot
my $restart_lsn = $node_standby->safe_psql('postgres',
my $restart_lsn = $node_standby->safe_psql(
'postgres',
"SELECT restart_lsn FROM pg_replication_slots
WHERE slot_name = 'vacuum_full_activeslot' AND conflicting;"
);

View File

@ -42,11 +42,15 @@ like(
qr/FATAL:\s+cannot connect to invalid database "regression_invalid"/,
"can't connect to invalid database - error message");
is($node->psql('postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'),
2, "can't ALTER invalid database");
is( $node->psql(
'postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'),
2,
"can't ALTER invalid database");
# check invalid database can't be used as a template
is( $node->psql('postgres', 'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'),
is( $node->psql(
'postgres',
'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'),
3,
"can't use invalid database as template");

View File

@ -170,7 +170,8 @@ $standby1->start;
# Capture the inactive_since of the slot from the primary. Note that the slot
# will be inactive since the corresponding subscription was dropped.
my $inactive_since_on_primary =
$primary->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary);
$primary->validate_slot_inactive_since('lsub1_slot',
$slot_creation_time_on_primary);
# Wait for the standby to catch up so that the standby is not lagging behind
# the failover slots.
@ -190,7 +191,8 @@ is( $standby1->safe_psql(
# Capture the inactive_since of the synced slot on the standby
my $inactive_since_on_standby =
$standby1->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary);
$standby1->validate_slot_inactive_since('lsub1_slot',
$slot_creation_time_on_primary);
# Synced slot on the standby must get its own inactive_since
is( $standby1->safe_psql(
@ -264,7 +266,8 @@ $primary->safe_psql(
# Capture the inactive_since of the slot from the primary. Note that the slot
# will be inactive since the corresponding subscription was dropped.
$inactive_since_on_primary =
$primary->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary);
$primary->validate_slot_inactive_since('lsub1_slot',
$slot_creation_time_on_primary);
# Wait for the standby to catch up so that the standby is not lagging behind
# the failover slots.
@ -276,8 +279,8 @@ my $log_offset = -s $standby1->logfile;
$standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();");
# Confirm that the invalidated slot has been dropped.
$standby1->wait_for_log(qr/dropped replication slot "lsub1_slot" of dbid [0-9]+/,
$log_offset);
$standby1->wait_for_log(
qr/dropped replication slot "lsub1_slot" of dbid [0-9]+/, $log_offset);
# Confirm that the logical slot has been re-created on the standby and is
# flagged as 'synced'
@ -336,7 +339,8 @@ ok( $stderr =~
"cannot sync slots if dbname is not specified in primary_conninfo");
# Add the dbname back to the primary_conninfo for further tests
$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=postgres'");
$standby1->append_conf('postgresql.conf',
"primary_conninfo = '$connstr_1 dbname=postgres'");
$standby1->reload;
##################################################
@ -427,19 +431,20 @@ $primary->wait_for_replay_catchup($standby1);
# synced slot. See the test where we promote standby (Promote the standby1 to
# primary.)
$primary->safe_psql('postgres',
"SELECT pg_logical_emit_message(false, 'test', 'test');"
);
"SELECT pg_logical_emit_message(false, 'test', 'test');");
# Get the confirmed_flush_lsn for the logical slot snap_test_slot on the primary
my $confirmed_flush_lsn = $primary->safe_psql('postgres',
"SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot';");
"SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot';"
);
$standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();");
# Verify that confirmed_flush_lsn of snap_test_slot slot is synced to the standby
ok( $standby1->poll_query_until(
'postgres',
"SELECT '$confirmed_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot' AND synced AND NOT temporary;"),
"SELECT '$confirmed_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot' AND synced AND NOT temporary;"
),
'confirmed_flush_lsn of slot snap_test_slot synced to standby');
##################################################
@ -479,22 +484,24 @@ GRANT USAGE on SCHEMA myschema TO repl_role;
});
# Start the standby with changed primary_conninfo.
$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=slotsync_test_db user=repl_role'");
$standby1->append_conf('postgresql.conf',
"primary_conninfo = '$connstr_1 dbname=slotsync_test_db user=repl_role'");
$standby1->start;
# Run the synchronization function. If the sync flow was not prepared
# to handle such attacks, it would have failed during the validation
# of the primary_slot_name itself resulting in
# ERROR: slot synchronization requires valid primary_slot_name
$standby1->safe_psql('slotsync_test_db', "SELECT pg_sync_replication_slots();");
$standby1->safe_psql('slotsync_test_db',
"SELECT pg_sync_replication_slots();");
# Reset the dbname and user in primary_conninfo to the earlier values.
$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=postgres'");
$standby1->append_conf('postgresql.conf',
"primary_conninfo = '$connstr_1 dbname=postgres'");
$standby1->reload;
# Drop the newly created database.
$primary->psql('postgres',
q{DROP DATABASE slotsync_test_db;});
$primary->psql('postgres', q{DROP DATABASE slotsync_test_db;});
##################################################
# Test to confirm that the slot sync worker exits on invalid GUC(s) and
@ -508,20 +515,21 @@ $standby1->append_conf('postgresql.conf', qq(sync_replication_slots = on));
$standby1->reload;
# Confirm that the slot sync worker is able to start.
$standby1->wait_for_log(qr/slot sync worker started/,
$log_offset);
$standby1->wait_for_log(qr/slot sync worker started/, $log_offset);
$log_offset = -s $standby1->logfile;
# Disable another GUC required for slot sync.
$standby1->append_conf( 'postgresql.conf', qq(hot_standby_feedback = off));
$standby1->append_conf('postgresql.conf', qq(hot_standby_feedback = off));
$standby1->reload;
# Confirm that slot sync worker acknowledge the GUC change and logs the msg
# about wrong configuration.
$standby1->wait_for_log(qr/slot sync worker will restart because of a parameter change/,
$standby1->wait_for_log(
qr/slot sync worker will restart because of a parameter change/,
$log_offset);
$standby1->wait_for_log(qr/slot synchronization requires hot_standby_feedback to be enabled/,
$standby1->wait_for_log(
qr/slot synchronization requires hot_standby_feedback to be enabled/,
$log_offset);
$log_offset = -s $standby1->logfile;
@ -531,8 +539,7 @@ $standby1->append_conf('postgresql.conf', "hot_standby_feedback = on");
$standby1->reload;
# Confirm that the slot sync worker is able to start now.
$standby1->wait_for_log(qr/slot sync worker started/,
$log_offset);
$standby1->wait_for_log(qr/slot sync worker started/, $log_offset);
##################################################
# Test to confirm that confirmed_flush_lsn of the logical slot on the primary
@ -557,7 +564,8 @@ $subscriber1->wait_for_subscription_sync;
# Do not allow any further advancement of the confirmed_flush_lsn for the
# lsub1_slot.
$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 DISABLE");
$subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 DISABLE");
# Wait for the replication slot to become inactive on the publisher
$primary->poll_query_until(
@ -567,12 +575,14 @@ $primary->poll_query_until(
# Get the confirmed_flush_lsn for the logical slot lsub1_slot on the primary
my $primary_flush_lsn = $primary->safe_psql('postgres',
"SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot';");
"SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot';"
);
# Confirm that confirmed_flush_lsn of lsub1_slot slot is synced to the standby
ok( $standby1->poll_query_until(
'postgres',
"SELECT '$primary_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot' AND synced AND NOT temporary;"),
"SELECT '$primary_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot' AND synced AND NOT temporary;"
),
'confirmed_flush_lsn of slot lsub1_slot synced to standby');
##################################################
@ -636,7 +646,8 @@ $subscriber2->safe_psql(
$subscriber2->wait_for_subscription_sync;
$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 ENABLE");
$subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 ENABLE");
my $offset = -s $primary->logfile;
@ -674,7 +685,8 @@ $primary->wait_for_log(
# primary and keeps waiting for the standby specified in standby_slot_names
# (sb1_slot aka standby1).
$result =
$subscriber1->safe_psql('postgres', "SELECT count(*) <> $primary_row_count FROM tab_int;");
$subscriber1->safe_psql('postgres',
"SELECT count(*) <> $primary_row_count FROM tab_int;");
is($result, 't',
"subscriber1 doesn't get data from primary until standby1 acknowledges changes"
);
@ -714,7 +726,8 @@ $standby1->stop;
# Disable the regress_mysub1 to prevent the logical walsender from generating
# more warnings.
$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 DISABLE");
$subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 DISABLE");
# Wait for the replication slot to become inactive on the publisher
$primary->poll_query_until(
@ -758,8 +771,7 @@ $primary->reload;
$back_q->quit;
$primary->safe_psql('postgres',
"SELECT pg_drop_replication_slot('test_slot');"
);
"SELECT pg_drop_replication_slot('test_slot');");
# Add the physical slot (sb1_slot) back to the standby_slot_names for further
# tests.
@ -767,7 +779,8 @@ $primary->adjust_conf('postgresql.conf', 'standby_slot_names', "'sb1_slot'");
$primary->reload;
# Enable the regress_mysub1 for further tests
$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 ENABLE");
$subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 ENABLE");
##################################################
# Test that logical replication will wait for the user-created inactive
@ -835,14 +848,16 @@ $standby1->promote;
# promotion. We do this check before the slot is enabled on the new primary
# below, otherwise, the slot gets active setting inactive_since to NULL.
my $inactive_since_on_new_primary =
$standby1->validate_slot_inactive_since('lsub1_slot', $promotion_time_on_primary);
$standby1->validate_slot_inactive_since('lsub1_slot',
$promotion_time_on_primary);
is( $standby1->safe_psql(
'postgres',
"SELECT '$inactive_since_on_new_primary'::timestamptz > '$inactive_since_on_primary'::timestamptz"
),
"t",
'synchronized slot has got its own inactive_since on the new primary after promotion');
'synchronized slot has got its own inactive_since on the new primary after promotion'
);
# Update subscription with the new primary's connection info
my $standby1_conninfo = $standby1->connstr . ' dbname=postgres';
@ -850,8 +865,10 @@ $subscriber1->safe_psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 CONNECTION '$standby1_conninfo';");
# Confirm the synced slot 'lsub1_slot' is retained on the new primary
is($standby1->safe_psql('postgres',
q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN ('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}),
is( $standby1->safe_psql(
'postgres',
q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN ('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}
),
't',
'synced slot retained on the new primary');
@ -861,9 +878,8 @@ $standby1->safe_psql('postgres',
$standby1->wait_for_catchup('regress_mysub1');
# Confirm that data in tab_int replicated on the subscriber
is( $subscriber1->safe_psql('postgres', q{SELECT count(*) FROM tab_int;}),
"20",
'data replicated from the new primary');
is($subscriber1->safe_psql('postgres', q{SELECT count(*) FROM tab_int;}),
"20", 'data replicated from the new primary');
# Consume the data from the snap_test_slot. The synced slot should reach a
# consistent point by restoring the snapshot at the restart_lsn serialized

View File

@ -86,7 +86,8 @@ switch_server_cert(
restart => 'no');
$result = $node->restart(fail_ok => 1);
is($result, 0, 'restart fails with password-protected key file with wrong password');
is($result, 0,
'restart fails with password-protected key file with wrong password');
switch_server_cert(
$node,

View File

@ -48,7 +48,8 @@ is($result, qq(2|2|2), 'check initial data was copied to subscriber');
# Update the rows on the publisher and check the additional columns on
# subscriber didn't change
$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = encode(sha256(b::bytea), 'hex')");
$node_publisher->safe_psql('postgres',
"UPDATE test_tab SET b = encode(sha256(b::bytea), 'hex')");
$node_publisher->wait_for_catchup('tap_sub');

View File

@ -32,7 +32,8 @@ $node_publisher->safe_psql('postgres',
# Setup structure on subscriber
$node_subscriber->safe_psql('postgres',
"CREATE TABLE test_tab (a int primary key, b bytea, c INT, d INT, e INT)");
"CREATE TABLE test_tab (a int primary key, b bytea, c INT, d INT, e INT)"
);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';

View File

@ -288,8 +288,7 @@ is( $node_subscriber->safe_psql(
# Since disabling subscription doesn't wait for walsender to release the replication
# slot and exit, wait for the slot to become inactive.
$node_publisher->poll_query_until(
$db,
$node_publisher->poll_query_until($db,
qq(SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '$sub2_name' AND active_pid IS NULL))
) or die "slot never became inactive";

View File

@ -166,7 +166,8 @@ BEGIN;
INSERT INTO tbl SELECT i, sha256(i::text::bytea) FROM generate_series(1, 10000) s(i);
COMMIT;
]);
test_skip_lsn($node_publisher, $node_subscriber, "(4, sha256(4::text::bytea))",
test_skip_lsn($node_publisher, $node_subscriber,
"(4, sha256(4::text::bytea))",
"4", "test skipping stream-commit");
$result = $node_subscriber->safe_psql('postgres',

View File

@ -490,7 +490,8 @@ $node_publisher->safe_psql('postgres',
$node_subscriber->safe_psql('postgres',
"CREATE TABLE test_replica_id_full (x int, y text)");
$node_subscriber->safe_psql('postgres',
"CREATE INDEX test_replica_id_full_idx ON test_replica_id_full USING HASH (x)");
"CREATE INDEX test_replica_id_full_idx ON test_replica_id_full USING HASH (x)"
);
# insert some initial data
$node_publisher->safe_psql('postgres',

View File

@ -207,10 +207,7 @@ GRANT regress_alice TO regress_admin WITH INHERIT FALSE, SET TRUE;
# the above grant doesn't help.
publish_insert("alice.unpartitioned", 14);
expect_failure(
"alice.unpartitioned",
3,
7,
13,
"alice.unpartitioned", 3, 7, 13,
qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
"with no privileges cannot replicate");

View File

@ -469,23 +469,22 @@ $node_subscriber->safe_psql(
));
$node_subscriber->wait_for_subscription_sync($node_publisher, 'sub1');
$result = $node_subscriber->safe_psql('postgres',
"SELECT a, b FROM tab_default");
is($result, qq(1|f
$result =
$node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab_default");
is( $result, qq(1|f
2|t), 'check snapshot on subscriber');
# Update all rows in the table and ensure the rows with the missing `b`
# attribute replicate correctly.
$node_publisher->safe_psql('postgres',
"UPDATE tab_default SET a = a + 1");
$node_publisher->safe_psql('postgres', "UPDATE tab_default SET a = a + 1");
$node_publisher->wait_for_catchup('sub1');
# When the bug is present, the `1|f` row will not be updated to `2|f` because
# the publisher incorrectly fills in `NULL` for `b` and publishes an update
# for `1|NULL`, which doesn't exist in the subscriber.
$result = $node_subscriber->safe_psql('postgres',
"SELECT a, b FROM tab_default");
is($result, qq(2|f
$result =
$node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab_default");
is( $result, qq(2|f
3|t), 'check replicated update on subscriber');
$node_publisher->stop('fast');

View File

@ -231,7 +231,6 @@ BUF_MEM
BYTE
BY_HANDLE_FILE_INFORMATION
Backend
BackendId
BackendParameters
BackendStartupData
BackendState
@ -245,7 +244,6 @@ Barrier
BaseBackupCmd
BaseBackupTargetHandle
BaseBackupTargetType
BasicArchiveData
BeginDirectModify_function
BeginForeignInsert_function
BeginForeignModify_function
@ -275,10 +273,19 @@ BlockId
BlockIdData
BlockInfoRecord
BlockNumber
BlockRefTable
BlockRefTableBuffer
BlockRefTableChunk
BlockRefTableEntry
BlockRefTableKey
BlockRefTableReader
BlockRefTableSerializedEntry
BlockRefTableWriter
BlockSampler
BlockSamplerData
BlockedProcData
BlockedProcsData
BlocktableEntry
BloomBuildState
BloomFilter
BloomMetaPageData
@ -367,6 +374,7 @@ CallStmt
CancelRequestPacket
Cardinality
CaseExpr
CaseKind
CaseTestExpr
CaseWhen
Cash
@ -483,8 +491,8 @@ CopyFromState
CopyFromStateData
CopyHeaderChoice
CopyInsertMethod
CopyMethod
CopyLogVerbosityChoice
CopyMethod
CopyMultiInsertBuffer
CopyMultiInsertInfo
CopyOnErrorChoice
@ -560,10 +568,14 @@ DR_intorel
DR_printtup
DR_sqlfunction
DR_transientrel
DSMRegistryCtxStruct
DSMRegistryEntry
DWORD
DataDirSyncMethod
DataDumperPtr
DataPageDeleteStack
DataTypesUsageChecks
DataTypesUsageVersionCheck
DatabaseInfo
DateADT
DateTimeErrorExtra
@ -621,8 +633,6 @@ DropSubscriptionStmt
DropTableSpaceStmt
DropUserMappingStmt
DropdbStmt
DSMRegistryCtxStruct
DSMRegistryEntry
DumpComponents
DumpId
DumpOptions
@ -758,6 +768,7 @@ FetchStmt
FieldSelect
FieldStore
File
FileBackupMethod
FileFdwExecutionState
FileFdwPlanState
FileNameMap
@ -1165,9 +1176,11 @@ ImportForeignSchemaType
ImportForeignSchema_function
ImportQual
InProgressEnt
InProgressIO
IncludeWal
InclusionOpaque
IncrementVarSublevelsUp_context
IncrementalBackupInfo
IncrementalSort
IncrementalSortExecutionStatus
IncrementalSortGroupInfo
@ -1223,7 +1236,6 @@ InjectionPointConditionType
InjectionPointEntry
InjectionPointSharedState
InlineCodeBlock
InProgressIO
InsertStmt
Instrumentation
Int128AggState
@ -1249,6 +1261,7 @@ IsForeignScanParallelSafe_function
IsoConnInfo
IspellDict
Item
ItemArray
ItemId
ItemIdData
ItemPointer
@ -1272,7 +1285,6 @@ Join
JoinCostWorkspace
JoinDomain
JoinExpr
JsonFuncExpr
JoinHashEntry
JoinPath
JoinPathExtraData
@ -1299,6 +1311,7 @@ JsonExprOp
JsonExprState
JsonFormat
JsonFormatType
JsonFuncExpr
JsonHashEntry
JsonIncrementalState
JsonIsPredicate
@ -1315,15 +1328,16 @@ JsonManifestWALRangeField
JsonObjectAgg
JsonObjectConstructor
JsonOutput
JsonParseExpr
JsonParseContext
JsonParseErrorType
JsonParseExpr
JsonParserStack
JsonPath
JsonPathBool
JsonPathDatatypeStatus
JsonPathCountVarsCallback
JsonPathExecContext
JsonPathExecResult
JsonPathGetVarCallback
JsonPathGinAddPathItemFunc
JsonPathGinContext
JsonPathGinExtractNodesFunc
@ -1334,7 +1348,6 @@ JsonPathGinPathItem
JsonPathItem
JsonPathItemType
JsonPathKeyword
JsonPathMutableContext
JsonPathParseItem
JsonPathParseResult
JsonPathPredicateCallback
@ -1398,6 +1411,7 @@ LINE
LLVMAttributeRef
LLVMBasicBlockRef
LLVMBuilderRef
LLVMContextRef
LLVMErrorRef
LLVMIntPredicate
LLVMJITEventListenerRef
@ -1976,7 +1990,6 @@ ParallelHashJoinBatch
ParallelHashJoinBatchAccessor
ParallelHashJoinState
ParallelIndexScanDesc
ParallelReadyList
ParallelSlot
ParallelSlotArray
ParallelSlotResultHandler
@ -2053,6 +2066,7 @@ PathClauseUsage
PathCostComparison
PathHashStack
PathKey
PathKeyInfo
PathKeysComparison
PathTarget
PatternInfo
@ -2175,7 +2189,6 @@ PortalStrategy
PostParseColumnRefHook
PostgresPollingStatusType
PostingItem
PostmasterChildType
PreParseColumnRefHook
PredClass
PredIterInfo
@ -2199,6 +2212,7 @@ PrivTarget
PrivateRefCountEntry
ProcArrayStruct
ProcLangInfo
ProcNumber
ProcSignalBarrierType
ProcSignalHeader
ProcSignalReason
@ -2217,8 +2231,8 @@ ProjectionPath
PromptInterruptContext
ProtocolVersion
PrsStorage
PruneReason
PruneFreezeResult
PruneReason
PruneState
PruneStepResult
PsqlScanCallbacks
@ -2320,6 +2334,7 @@ ReadFunc
ReadLocalXLogPageNoWaitPrivate
ReadReplicationSlotCmd
ReadStream
ReadStreamBlockNumberCB
ReassignOwnedStmt
RecheckForeignScan_function
RecordCacheArrayEntry
@ -2433,6 +2448,7 @@ ResourceOwnerDesc
ResourceReleaseCallback
ResourceReleaseCallbackItem
ResourceReleasePhase
ResourceReleasePriority
RestoreOptions
RestorePass
RestrictInfo
@ -2696,8 +2712,8 @@ SpecialJoinInfo
SpinDelayStatus
SplitInterval
SplitLR
SplitPartitionContext
SplitPageLayout
SplitPartitionContext
SplitPoint
SplitTextOutputData
SplitVar
@ -2753,6 +2769,7 @@ SubscriptingRefState
Subscription
SubscriptionInfo
SubscriptionRelState
SummarizerReadLocalXLogPrivate
SupportRequestCost
SupportRequestIndexCondition
SupportRequestOptimizeWindowClause
@ -2761,15 +2778,16 @@ SupportRequestSelectivity
SupportRequestSimplify
SupportRequestWFuncMonotonic
Syn
SyncingTablesState
SyncOps
SyncRepConfigData
SyncRepStandbyData
SyncRequestHandler
SyncRequestType
SyncingTablesState
SysFKRelationship
SysScanDesc
SyscacheCallbackFunction
SysloggerStartupData
SystemRowsSamplerData
SystemSamplerData
SystemTimeSamplerData
@ -2868,6 +2886,7 @@ TestDSMRegistryStruct
TestDecodingData
TestDecodingTxnData
TestSpec
TestValueType
TextFreq
TextPositionState
TheLexeme
@ -2882,6 +2901,9 @@ TidRangeScan
TidRangeScanState
TidScan
TidScanState
TidStore
TidStoreIter
TidStoreIterResult
TimeADT
TimeLineHistoryCmd
TimeLineHistoryEntry
@ -2904,7 +2926,6 @@ TocEntry
TokenAuxData
TokenizedAuthLine
TrackItem
TransamVariablesData
TransApplyAction
TransInvalidationInfo
TransState
@ -2913,6 +2934,7 @@ TransactionState
TransactionStateData
TransactionStmt
TransactionStmtKind
TransamVariablesData
TransformInfo
TransformJsonStringValuesState
TransitionCaptureState
@ -2956,7 +2978,6 @@ TupleTableSlotOps
TuplesortClusterArg
TuplesortDatumArg
TuplesortIndexArg
TuplesortIndexBrinArg
TuplesortIndexBTreeArg
TuplesortIndexHashArg
TuplesortInstrumentation
@ -3009,6 +3030,7 @@ UnresolvedTup
UnresolvedTupData
UpdateContext
UpdateStmt
UploadManifestCmd
UpperRelationKind
UpperUniquePath
UserAuth
@ -3057,7 +3079,6 @@ VolatileFunctionStatus
Vsrt
WAIT_ORDER
WALAvailability
WalInsertClass
WALInsertLock
WALInsertLockPadded
WALOpenSegment
@ -3090,6 +3111,7 @@ WaitEventTimeout
WaitPMResult
WalCloseMethod
WalCompression
WalInsertClass
WalLevel
WalRcvData
WalRcvExecResult
@ -3103,6 +3125,9 @@ WalSnd
WalSndCtlData
WalSndSendDataCallback
WalSndState
WalSummarizerData
WalSummaryFile
WalSummaryIO
WalSyncMethod
WalTimeSample
WalUsage
@ -3127,6 +3152,7 @@ WindowStatePerAggData
WindowStatePerFunc
WithCheckOption
WithClause
WordBoundaryNext
WordEntry
WordEntryIN
WordEntryPos
@ -3217,12 +3243,15 @@ ZstdCompressorState
_SPI_connection
_SPI_plan
__m128i
__m512i
__mmask64
__time64_t
_dev_t
_ino_t
_locale_t
_resultmap
_stringlist
access_vector_t
acquireLocksOnSubLinks_context
add_nulling_relids_context
adjust_appendrel_attrs_context
@ -3241,6 +3270,7 @@ amgetbitmap_function
amgettuple_function
aminitparallelscan_function
aminsert_function
aminsertcleanup_function
ammarkpos_function
amoptions_function
amparallelrescan_function
@ -3255,13 +3285,17 @@ assign_collations_context
auth_password_hook_typ
autovac_table
av_relation
avc_cache
avl_dbase
avl_node
avl_tree
avw_dbase
backslashResult
backup_file_entry
backup_file_hash
backup_manifest_info
backup_manifest_option
backup_wal_range
base_yy_extra_type
basebackup_options
bbsink
@ -3295,6 +3329,8 @@ bitmapword
bits16
bits32
bits8
blockreftable_hash
blockreftable_iterator
bloom_filter
boolKEY
brin_column_state
@ -3304,6 +3340,10 @@ cached_re_str
canonicalize_state
cashKEY
catalogid_hash
cb_cleanup_dir
cb_options
cb_tablespace
cb_tablespace_mapping
check_agg_arguments_context
check_function_callback
check_network_data
@ -3370,6 +3410,7 @@ dsa_segment_header
dsa_segment_index
dsa_segment_map
dshash_compare_function
dshash_copy_function
dshash_hash
dshash_hash_function
dshash_parameters
@ -3395,6 +3436,7 @@ emit_log_hook_type
eval_const_expressions_context
exec_thread_arg
execution_state
exit_function
explain_get_index_name_hook_type
f_smgr
fasthash_state
@ -3493,6 +3535,7 @@ indexed_tlist
inet
inetKEY
inet_struct
initRowMethod
init_function
inline_cte_walker_context
inline_error_callback_arg
@ -3508,12 +3551,14 @@ int32_t
int64
int64KEY
int8
int8x16_t
internalPQconninfoOption
intptr_t
intset_internal_node
intset_leaf_node
intset_node
intvKEY
io_callback_fn
io_stat_col
itemIdCompact
itemIdCompactData
@ -3524,6 +3569,8 @@ json_aelem_action
json_manifest_error_callback
json_manifest_per_file_callback
json_manifest_per_wal_range_callback
json_manifest_system_identifier_callback
json_manifest_version_callback
json_ofield_action
json_scalar_action
json_struct_action
@ -3540,6 +3587,8 @@ list_sort_comparator
local_relopt
local_relopts
local_source
local_ts_iter
local_ts_radix_tree
locale_t
locate_agg_of_level_context
locate_var_of_level_context
@ -3558,10 +3607,12 @@ macKEY
macaddr
macaddr8
macaddr_sortsupport_state
manifest_data
manifest_file
manifest_files_hash
manifest_files_iterator
manifest_wal_range
manifest_writer
map_variable_attnos_context
max_parallel_hazard_context
mb2wchar_with_len_converter
@ -3608,14 +3659,16 @@ pairingheap_node
pam_handle_t
parallel_worker_main_type
parse_error_callback_arg
parser_context
partition_method_t
pendingPosition
pending_label
pgParameterStatus
pg_atomic_flag
pg_atomic_uint32
pg_atomic_uint64
pg_be_sasl_mech
pg_case_map
pg_category_range
pg_checksum_context
pg_checksum_raw_context
pg_checksum_type
@ -3659,10 +3712,13 @@ pg_time_usec_t
pg_tz
pg_tz_cache
pg_tzenum
pg_unicode_category
pg_unicode_decompinfo
pg_unicode_decomposition
pg_unicode_norminfo
pg_unicode_normprops
pg_unicode_properties
pg_unicode_range
pg_unicode_recompinfo
pg_utf_to_local_combined
pg_uuid_t
@ -3788,24 +3844,32 @@ remove_nulling_relids_context
rendezvousHashEntry
replace_rte_variables_callback
replace_rte_variables_context
report_error_fn
ret_type
rewind_source
rewrite_event
rf_context
rfile
rm_detail_t
rt_node_class_test_elem
role_auth_extra
rolename_hash
row_security_policy_hook_type
rsv_callback
rt_iter
rt_node_class_test_elem
rt_radix_tree
saophash_hash
save_buffer
scram_state
scram_state_enum
security_class_t
sem_t
sepgsql_context_info_t
sequence_magic
set_join_pathlist_hook_type
set_rel_pathlist_hook_type
shared_ts_iter
shared_ts_radix_tree
shm_mq
shm_mq_handle
shm_mq_iovec
@ -3871,6 +3935,7 @@ substitute_actual_srf_parameters_context
substitute_phv_relids_context
symbol
tablespaceinfo
td_entry
teSection
temp_tablespaces_extra
test_re_flags
@ -3912,6 +3977,7 @@ uid_t
uint128
uint16
uint16_t
uint16x8_t
uint32
uint32_t
uint32x4_t
@ -3951,6 +4017,7 @@ walrcv_endstreaming_fn
walrcv_exec_fn
walrcv_get_backend_pid_fn
walrcv_get_conninfo_fn
walrcv_get_dbname_from_conninfo_fn
walrcv_get_senderinfo_fn
walrcv_identify_system_fn
walrcv_readtimelinehistoryfile_fn
@ -3962,10 +4029,11 @@ wchar2mb_with_len_converter
wchar_t
win32_deadchild_waitinfo
wint_t
worker_spi_state
worker_state
worktable
wrap
ws_file_info
ws_options
xl_brin_createidx
xl_brin_desummarize
xl_brin_insert
@ -4059,6 +4127,7 @@ xmlBuffer
xmlBufferPtr
xmlChar
xmlDocPtr
xmlError
xmlErrorPtr
xmlExternalEntityLoader
xmlGenericErrorFunc
@ -4085,35 +4154,3 @@ yyscan_t
z_stream
z_streamp
zic_t
BlockRefTable
BlockRefTableBuffer
BlockRefTableEntry
BlockRefTableKey
BlockRefTableReader
BlockRefTableSerializedEntry
BlockRefTableWriter
SummarizerReadLocalXLogPrivate
SysloggerStartupData
WalSummarizerData
WalSummaryFile
WalSummaryIO
FileBackupMethod
IncrementalBackupInfo
UploadManifestCmd
backup_file_entry
backup_wal_range
cb_cleanup_dir
cb_options
cb_tablespace
cb_tablespace_mapping
manifest_data
manifest_writer
rfile
ws_options
ws_file_info
PathKeyInfo
TidStore
TidStoreIter
TidStoreIterResult
BlocktableEntry
ItemArray