Add PostgreSQL::Test::Cluster::advance_wal

This is a function that makes a node jump by N WAL segments, which is
something a couple of tests have been relying on for some cases related
to streaming, replication slot limits and logical decoding on standbys.
Hence, this centralizes the logic, while making it cheaper by relying on
pg_logical_emit_message() to emit WAL records before switching to a new
segment.

Author: Bharath Rupireddy
Reviewed-by: Kyotaro Horiguchi, Euler Taveira
Discussion: https://postgr.es/m/CALj2ACU3R8QFCvDewHCMKjgb2w_-CMCyd6DAK=Jb-af14da5eg@mail.gmail.com
This commit is contained in:
Michael Paquier 2023-12-21 10:19:17 +09:00
parent bf6260b39d
commit c161ab74f7
4 changed files with 42 additions and 45 deletions

View File

@ -3197,6 +3197,31 @@ sub create_logical_slot_on_standby
=pod
=item $node->advance_wal(num)
Advance WAL of node by given number of segments.
=cut
sub advance_wal
{
my ($self, $num) = @_;
# Advance by $n segments (= (wal_segment_size * $num) bytes).
# pg_switch_wal() forces a WAL flush, making pg_logical_emit_message()
# safe to use in non-transactional mode.
for (my $i = 0; $i < $num; $i++)
{
$self->safe_psql(
'postgres', qq{
SELECT pg_logical_emit_message(false, '', 'foo');
SELECT pg_switch_wal();
});
}
}
=pod
=back
=cut

View File

@ -522,11 +522,7 @@ $node_primary->safe_psql('postgres',
my $segment_removed = $node_primary->safe_psql('postgres',
'SELECT pg_walfile_name(pg_current_wal_lsn())');
chomp($segment_removed);
$node_primary->psql(
'postgres', "
CREATE TABLE tab_phys_slot (a int);
INSERT INTO tab_phys_slot VALUES (generate_series(1,10));
SELECT pg_switch_wal();");
$node_primary->advance_wal(1);
my $current_lsn =
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
chomp($current_lsn);

View File

@ -59,7 +59,7 @@ $result = $node_primary->safe_psql('postgres',
is($result, "reserved|t", 'check the catching-up state');
# Advance WAL by five segments (= 5MB) on primary
advance_wal($node_primary, 1);
$node_primary->advance_wal(1);
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# The slot is always "safe" when fitting max_wal_size
@ -69,7 +69,7 @@ $result = $node_primary->safe_psql('postgres',
is($result, "reserved|t",
'check that it is safe if WAL fits in max_wal_size');
advance_wal($node_primary, 4);
$node_primary->advance_wal(4);
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# The slot is always "safe" when max_slot_wal_keep_size is not set
@ -100,7 +100,7 @@ $result = $node_primary->safe_psql('postgres',
is($result, "reserved", 'check that max_slot_wal_keep_size is working');
# Advance WAL again then checkpoint, reducing remain by 2 MB.
advance_wal($node_primary, 2);
$node_primary->advance_wal(2);
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# The slot is still working
@ -118,7 +118,7 @@ $node_standby->stop;
$result = $node_primary->safe_psql('postgres',
"ALTER SYSTEM SET wal_keep_size to '8MB'; SELECT pg_reload_conf();");
# Advance WAL again then checkpoint, reducing remain by 6 MB.
advance_wal($node_primary, 6);
$node_primary->advance_wal(6);
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status as remain FROM pg_replication_slots WHERE slot_name = 'rep1'"
);
@ -134,7 +134,7 @@ $node_primary->wait_for_catchup($node_standby);
$node_standby->stop;
# Advance WAL again without checkpoint, reducing remain by 6 MB.
advance_wal($node_primary, 6);
$node_primary->advance_wal(6);
# Slot gets into 'reserved' state
$result = $node_primary->safe_psql('postgres',
@ -145,7 +145,7 @@ is($result, "extended", 'check that the slot state changes to "extended"');
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# Advance WAL again without checkpoint; remain goes to 0.
advance_wal($node_primary, 1);
$node_primary->advance_wal(1);
# Slot gets into 'unreserved' state and safe_wal_size is negative
$result = $node_primary->safe_psql('postgres',
@ -174,7 +174,7 @@ $node_primary->safe_psql('postgres',
# Advance WAL again. The slot loses the oldest segment by the next checkpoint
my $logstart = -s $node_primary->logfile;
advance_wal($node_primary, 7);
$node_primary->advance_wal(7);
# Now create another checkpoint and wait until the WARNING is issued
$node_primary->safe_psql('postgres',
@ -275,18 +275,12 @@ $node_standby->init_from_backup($node_primary2, $backup_name,
has_streaming => 1);
$node_standby->append_conf('postgresql.conf', "primary_slot_name = 'rep1'");
$node_standby->start;
my @result =
split(
'\n',
$node_primary2->safe_psql(
'postgres',
"CREATE TABLE tt();
DROP TABLE tt;
SELECT pg_switch_wal();
CHECKPOINT;
SELECT 'finished';",
timeout => $PostgreSQL::Test::Utils::timeout_default));
is($result[1], 'finished', 'check if checkpoint command is not blocked');
$node_primary2->advance_wal(1);
$result = $node_primary2->safe_psql(
'postgres',
"CHECKPOINT; SELECT 'finished';",
timeout => $PostgreSQL::Test::Utils::timeout_default);
is($result, 'finished', 'check if checkpoint command is not blocked');
$node_primary2->stop;
$node_standby->stop;
@ -372,7 +366,7 @@ $logstart = -s $node_primary3->logfile;
# freeze walsender and walreceiver. Slot will still be active, but walreceiver
# won't get anything anymore.
kill 'STOP', $senderpid, $receiverpid;
advance_wal($node_primary3, 2);
$node_primary3->advance_wal(2);
my $msg_logged = 0;
my $max_attempts = $PostgreSQL::Test::Utils::timeout_default;
@ -418,19 +412,4 @@ kill 'CONT', $receiverpid;
$node_primary3->stop;
$node_standby3->stop;
#####################################
# Advance WAL of $node by $n segments
sub advance_wal
{
my ($node, $n) = @_;
# Advance by $n segments (= (wal_segment_size * $n) bytes) on primary.
for (my $i = 0; $i < $n; $i++)
{
$node->safe_psql('postgres',
"CREATE TABLE t (); DROP TABLE t; SELECT pg_switch_wal();");
}
return;
}
done_testing();

View File

@ -524,11 +524,8 @@ my $walfile_name = $node_primary->safe_psql('postgres',
chomp($walfile_name);
# Generate some activity and switch WAL file on the primary
$node_primary->safe_psql(
'postgres', "create table retain_test(a int);
select pg_switch_wal();
insert into retain_test values(1);
checkpoint;");
$node_primary->advance_wal(1);
$node_primary->safe_psql('postgres', "checkpoint;");
# Wait for the standby to catch up
$node_primary->wait_for_replay_catchup($node_standby);