Post-PG 10 beta1 pgperltidy run

This commit is contained in:
Bruce Momjian 2017-05-17 19:01:23 -04:00
parent a6fd7b7a5f
commit ce55481032
66 changed files with 1869 additions and 1184 deletions

View File

@ -92,7 +92,8 @@ if ($opt{v})
if ($opt{e}) if ($opt{e})
{ {
my @plan = map { "$_->[0]\n" } @{$dbi->selectall_arrayref("explain $sql")}; my @plan =
map { "$_->[0]\n" } @{ $dbi->selectall_arrayref("explain $sql") };
print @plan; print @plan;
} }

View File

@ -80,10 +80,11 @@ sub Catalogs
{ {
$catalog{natts} = $1; $catalog{natts} = $1;
} }
elsif (/^DATA\(insert(\s+OID\s+=\s+(\d+))?\s+\(\s*(.*)\s*\)\s*\)$/) elsif (
/^DATA\(insert(\s+OID\s+=\s+(\d+))?\s+\(\s*(.*)\s*\)\s*\)$/)
{ {
check_natts($filename, $catalog{natts}, $3, check_natts($filename, $catalog{natts}, $3, $input_file,
$input_file, $input_line_number); $input_line_number);
push @{ $catalog{data} }, { oid => $2, bki_values => $3 }; push @{ $catalog{data} }, { oid => $2, bki_values => $3 };
} }
@ -256,7 +257,8 @@ sub check_natts
{ {
my ($catname, $natts, $bki_val, $file, $line) = @_; my ($catname, $natts, $bki_val, $file, $line) = @_;
die "Could not find definition for Natts_${catname} before start of DATA() in $file\n" die
"Could not find definition for Natts_${catname} before start of DATA() in $file\n"
unless defined $natts; unless defined $natts;
my $nfields = scalar(SplitDataLine($bki_val)); my $nfields = scalar(SplitDataLine($bki_val));

View File

@ -163,11 +163,13 @@ foreach my $catname (@{ $catalogs->{names} })
# Split line into tokens without interpreting their meaning. # Split line into tokens without interpreting their meaning.
my %bki_values; my %bki_values;
@bki_values{@attnames} = Catalog::SplitDataLine($row->{bki_values}); @bki_values{@attnames} =
Catalog::SplitDataLine($row->{bki_values});
# Perform required substitutions on fields # Perform required substitutions on fields
foreach my $att (keys %bki_values) foreach my $att (keys %bki_values)
{ {
# Substitute constant values we acquired above. # Substitute constant values we acquired above.
# (It's intentional that this can apply to parts of a field). # (It's intentional that this can apply to parts of a field).
$bki_values{$att} =~ s/\bPGUID\b/$BOOTSTRAP_SUPERUSERID/g; $bki_values{$att} =~ s/\bPGUID\b/$BOOTSTRAP_SUPERUSERID/g;
@ -459,7 +461,8 @@ sub bki_insert
my $row = shift; my $row = shift;
my @attnames = @_; my @attnames = @_;
my $oid = $row->{oid} ? "OID = $row->{oid} " : ''; my $oid = $row->{oid} ? "OID = $row->{oid} " : '';
my $bki_values = join ' ', map { $_ eq '' ? '""' : $_ } map $row->{$_}, @attnames; my $bki_values = join ' ', map { $_ eq '' ? '""' : $_ } map $row->{$_},
@attnames;
printf $bki "insert %s( %s )\n", $oid, $bki_values; printf $bki "insert %s( %s )\n", $oid, $bki_values;
} }

View File

@ -149,7 +149,8 @@ while (my ($kcat, $kcat_id) = each(%keyword_categories))
# Now read in kwlist.h # Now read in kwlist.h
open(my $kwlist, '<', $kwlist_filename) || die("Could not open : $kwlist_filename"); open(my $kwlist, '<', $kwlist_filename)
|| die("Could not open : $kwlist_filename");
my $prevkwstring = ''; my $prevkwstring = '';
my $bare_kwname; my $bare_kwname;

View File

@ -58,6 +58,7 @@ foreach my $column (@{ $catalogs->{pg_proc}->{columns} })
my $data = $catalogs->{pg_proc}->{data}; my $data = $catalogs->{pg_proc}->{data};
foreach my $row (@$data) foreach my $row (@$data)
{ {
# Split line into tokens without interpreting their meaning. # Split line into tokens without interpreting their meaning.
my %bki_values; my %bki_values;
@bki_values{@attnames} = Catalog::SplitDataLine($row->{bki_values}); @bki_values{@attnames} = Catalog::SplitDataLine($row->{bki_values});
@ -80,9 +81,12 @@ my $oidsfile = $output_path . 'fmgroids.h';
my $protosfile = $output_path . 'fmgrprotos.h'; my $protosfile = $output_path . 'fmgrprotos.h';
my $tabfile = $output_path . 'fmgrtab.c'; my $tabfile = $output_path . 'fmgrtab.c';
open my $ofh, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!"; open my $ofh, '>', $oidsfile . $tmpext
open my $pfh, '>', $protosfile . $tmpext or die "Could not open $protosfile$tmpext: $!"; or die "Could not open $oidsfile$tmpext: $!";
open my $tfh, '>', $tabfile . $tmpext or die "Could not open $tabfile$tmpext: $!"; open my $pfh, '>', $protosfile . $tmpext
or die "Could not open $protosfile$tmpext: $!";
open my $tfh, '>', $tabfile . $tmpext
or die "Could not open $tabfile$tmpext: $!";
print $ofh print $ofh
qq|/*------------------------------------------------------------------------- qq|/*-------------------------------------------------------------------------

View File

@ -35,7 +35,8 @@ my $all = &read_source("BIG5.TXT");
# Load CP950.TXT # Load CP950.TXT
my $cp950txt = &read_source("CP950.TXT"); my $cp950txt = &read_source("CP950.TXT");
foreach my $i (@$cp950txt) { foreach my $i (@$cp950txt)
{
my $code = $i->{code}; my $code = $i->{code};
my $ucs = $i->{ucs}; my $ucs = $i->{ucs};
@ -46,7 +47,8 @@ foreach my $i (@$cp950txt) {
&& $code >= 0xf9d6 && $code >= 0xf9d6
&& $code <= 0xf9dc) && $code <= 0xf9dc)
{ {
push @$all, {code => $code, push @$all,
{ code => $code,
ucs => $ucs, ucs => $ucs,
comment => $i->{comment}, comment => $i->{comment},
direction => BOTH, direction => BOTH,
@ -55,7 +57,8 @@ foreach my $i (@$cp950txt) {
} }
} }
foreach my $i (@$all) { foreach my $i (@$all)
{
my $code = $i->{code}; my $code = $i->{code};
my $ucs = $i->{ucs}; my $ucs = $i->{ucs};

View File

@ -38,8 +38,10 @@ while (<$in>)
# a lot of extra characters on top of the GB2312 character set that # a lot of extra characters on top of the GB2312 character set that
# EUC_CN encodes. Filter out those extra characters. # EUC_CN encodes. Filter out those extra characters.
next if (($code & 0xFF) < 0xA1); next if (($code & 0xFF) < 0xA1);
next if (!($code >= 0xA100 && $code <= 0xA9FF || next
$code >= 0xB000 && $code <= 0xF7FF)); if (
!( $code >= 0xA100 && $code <= 0xA9FF
|| $code >= 0xB000 && $code <= 0xF7FF));
next if ($code >= 0xA2A1 && $code <= 0xA2B0); next if ($code >= 0xA2A1 && $code <= 0xA2B0);
next if ($code >= 0xA2E3 && $code <= 0xA2E4); next if ($code >= 0xA2E3 && $code <= 0xA2E4);
@ -67,13 +69,12 @@ while (<$in>)
$ucs = 0x2015; $ucs = 0x2015;
} }
push @mapping, { push @mapping,
ucs => $ucs, { ucs => $ucs,
code => $code, code => $code,
direction => BOTH, direction => BOTH,
f => $in_file, f => $in_file,
l => $. l => $. };
};
} }
close($in); close($in);

View File

@ -24,6 +24,7 @@ while (my $line = <$in>)
{ {
if ($line =~ /^0x(.*)[ \t]*U\+(.*)\+(.*)[ \t]*#(.*)$/) if ($line =~ /^0x(.*)[ \t]*U\+(.*)\+(.*)[ \t]*#(.*)$/)
{ {
# combined characters # combined characters
my ($c, $u1, $u2) = ($1, $2, $3); my ($c, $u1, $u2) = ($1, $2, $3);
my $rest = "U+" . $u1 . "+" . $u2 . $4; my $rest = "U+" . $u1 . "+" . $u2 . $4;
@ -31,17 +32,18 @@ while (my $line = <$in>)
my $ucs1 = hex($u1); my $ucs1 = hex($u1);
my $ucs2 = hex($u2); my $ucs2 = hex($u2);
push @all, { direction => BOTH, push @all,
{ direction => BOTH,
ucs => $ucs1, ucs => $ucs1,
ucs_second => $ucs2, ucs_second => $ucs2,
code => $code, code => $code,
comment => $rest, comment => $rest,
f => $in_file, f => $in_file,
l => $. l => $. };
};
} }
elsif ($line =~ /^0x(.*)[ \t]*U\+(.*)[ \t]*#(.*)$/) elsif ($line =~ /^0x(.*)[ \t]*U\+(.*)[ \t]*#(.*)$/)
{ {
# non-combined characters # non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3); my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
my $ucs = hex($u); my $ucs = hex($u);
@ -49,13 +51,13 @@ while (my $line = <$in>)
next if ($code < 0x80 && $ucs < 0x80); next if ($code < 0x80 && $ucs < 0x80);
push @all, { direction => BOTH, push @all,
{ direction => BOTH,
ucs => $ucs, ucs => $ucs,
code => $code, code => $code,
comment => $rest, comment => $rest,
f => $in_file, f => $in_file,
l => $. l => $. };
};
} }
} }
close($in); close($in);

View File

@ -21,7 +21,9 @@ my $jis0212 = &read_source("JIS0212.TXT");
my @mapping; my @mapping;
foreach my $i (@$jis0212) { foreach my $i (@$jis0212)
{
# We have a different mapping for this in the EUC_JP to UTF-8 direction. # We have a different mapping for this in the EUC_JP to UTF-8 direction.
if ($i->{code} == 0x2243) if ($i->{code} == 0x2243)
{ {
@ -48,13 +50,14 @@ foreach my $i (@$jis0212) {
# Load CP932.TXT. # Load CP932.TXT.
my $ct932 = &read_source("CP932.TXT"); my $ct932 = &read_source("CP932.TXT");
foreach my $i (@$ct932) { foreach my $i (@$ct932)
{
my $sjis = $i->{code}; my $sjis = $i->{code};
# We have a different mapping for this in the EUC_JP to UTF-8 direction. # We have a different mapping for this in the EUC_JP to UTF-8 direction.
if ($sjis == 0xeefa || if ( $sjis == 0xeefa
$sjis == 0xeefb || || $sjis == 0xeefb
$sjis == 0xeefc) || $sjis == 0xeefc)
{ {
next; next;
} }
@ -63,8 +66,10 @@ foreach my $i (@$ct932) {
{ {
my $jis = &sjis2jis($sjis); my $jis = &sjis2jis($sjis);
$i->{code} = $jis | ($jis < 0x100 ? 0x8e00 : $i->{code} = $jis | (
($sjis >= 0xeffd ? 0x8f8080 : 0x8080)); $jis < 0x100
? 0x8e00
: ($sjis >= 0xeffd ? 0x8f8080 : 0x8080));
# Remember the SJIS code for later. # Remember the SJIS code for later.
$i->{sjis} = $sjis; $i->{sjis} = $sjis;
@ -73,13 +78,14 @@ foreach my $i (@$ct932) {
} }
} }
foreach my $i (@mapping) { foreach my $i (@mapping)
{
my $sjis = $i->{sjis}; my $sjis = $i->{sjis};
# These SJIS characters are excluded completely. # These SJIS characters are excluded completely.
if ($sjis >= 0xed00 && $sjis <= 0xeef9 || if ( $sjis >= 0xed00 && $sjis <= 0xeef9
$sjis >= 0xfa54 && $sjis <= 0xfa56 || || $sjis >= 0xfa54 && $sjis <= 0xfa56
$sjis >= 0xfa58 && $sjis <= 0xfc4b) || $sjis >= 0xfa58 && $sjis <= 0xfc4b)
{ {
$i->{direction} = NONE; $i->{direction} = NONE;
next; next;
@ -92,10 +98,16 @@ foreach my $i (@mapping) {
next; next;
} }
if ($sjis == 0x8790 || $sjis == 0x8791 || $sjis == 0x8792 || if ( $sjis == 0x8790
$sjis == 0x8795 || $sjis == 0x8796 || $sjis == 0x8797 || || $sjis == 0x8791
$sjis == 0x879a || $sjis == 0x879b || $sjis == 0x879c || || $sjis == 0x8792
($sjis >= 0xfa4a && $sjis <= 0xfa53)) || $sjis == 0x8795
|| $sjis == 0x8796
|| $sjis == 0x8797
|| $sjis == 0x879a
|| $sjis == 0x879b
|| $sjis == 0x879c
|| ($sjis >= 0xfa4a && $sjis <= 0xfa53))
{ {
$i->{direction} = TO_UNICODE; $i->{direction} = TO_UNICODE;
next; next;
@ -103,95 +115,352 @@ foreach my $i (@mapping) {
} }
push @mapping, ( push @mapping, (
{direction => BOTH, ucs => 0x4efc, code => 0x8ff4af, comment => '# CJK(4EFC)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x50f4, code => 0x8ff4b0, comment => '# CJK(50F4)'}, ucs => 0x4efc,
{direction => BOTH, ucs => 0x51EC, code => 0x8ff4b1, comment => '# CJK(51EC)'}, code => 0x8ff4af,
{direction => BOTH, ucs => 0x5307, code => 0x8ff4b2, comment => '# CJK(5307)'}, comment => '# CJK(4EFC)' },
{direction => BOTH, ucs => 0x5324, code => 0x8ff4b3, comment => '# CJK(5324)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x548A, code => 0x8ff4b5, comment => '# CJK(548A)'}, ucs => 0x50f4,
{direction => BOTH, ucs => 0x5759, code => 0x8ff4b6, comment => '# CJK(5759)'}, code => 0x8ff4b0,
{direction => BOTH, ucs => 0x589E, code => 0x8ff4b9, comment => '# CJK(589E)'}, comment => '# CJK(50F4)' },
{direction => BOTH, ucs => 0x5BEC, code => 0x8ff4ba, comment => '# CJK(5BEC)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x5CF5, code => 0x8ff4bb, comment => '# CJK(5CF5)'}, ucs => 0x51EC,
{direction => BOTH, ucs => 0x5D53, code => 0x8ff4bc, comment => '# CJK(5D53)'}, code => 0x8ff4b1,
{direction => BOTH, ucs => 0x5FB7, code => 0x8ff4be, comment => '# CJK(5FB7)'}, comment => '# CJK(51EC)' },
{direction => BOTH, ucs => 0x6085, code => 0x8ff4bf, comment => '# CJK(6085)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x6120, code => 0x8ff4c0, comment => '# CJK(6120)'}, ucs => 0x5307,
{direction => BOTH, ucs => 0x654E, code => 0x8ff4c1, comment => '# CJK(654E)'}, code => 0x8ff4b2,
{direction => BOTH, ucs => 0x663B, code => 0x8ff4c2, comment => '# CJK(663B)'}, comment => '# CJK(5307)' },
{direction => BOTH, ucs => 0x6665, code => 0x8ff4c3, comment => '# CJK(6665)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x6801, code => 0x8ff4c6, comment => '# CJK(6801)'}, ucs => 0x5324,
{direction => BOTH, ucs => 0x6A6B, code => 0x8ff4c9, comment => '# CJK(6A6B)'}, code => 0x8ff4b3,
{direction => BOTH, ucs => 0x6AE2, code => 0x8ff4ca, comment => '# CJK(6AE2)'}, comment => '# CJK(5324)' },
{direction => BOTH, ucs => 0x6DF2, code => 0x8ff4cc, comment => '# CJK(6DF2)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x6DF8, code => 0x8ff4cb, comment => '# CJK(6DF8)'}, ucs => 0x548A,
{direction => BOTH, ucs => 0x7028, code => 0x8ff4cd, comment => '# CJK(7028)'}, code => 0x8ff4b5,
{direction => BOTH, ucs => 0x70BB, code => 0x8ff4ae, comment => '# CJK(70BB)'}, comment => '# CJK(548A)' },
{direction => BOTH, ucs => 0x7501, code => 0x8ff4d0, comment => '# CJK(7501)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x7682, code => 0x8ff4d1, comment => '# CJK(7682)'}, ucs => 0x5759,
{direction => BOTH, ucs => 0x769E, code => 0x8ff4d2, comment => '# CJK(769E)'}, code => 0x8ff4b6,
{direction => BOTH, ucs => 0x7930, code => 0x8ff4d4, comment => '# CJK(7930)'}, comment => '# CJK(5759)' },
{direction => BOTH, ucs => 0x7AE7, code => 0x8ff4d9, comment => '# CJK(7AE7)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x7DA0, code => 0x8ff4dc, comment => '# CJK(7DA0)'}, ucs => 0x589E,
{direction => BOTH, ucs => 0x7DD6, code => 0x8ff4dd, comment => '# CJK(7DD6)'}, code => 0x8ff4b9,
{direction => BOTH, ucs => 0x8362, code => 0x8ff4df, comment => '# CJK(8362)'}, comment => '# CJK(589E)' },
{direction => BOTH, ucs => 0x85B0, code => 0x8ff4e1, comment => '# CJK(85B0)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x8807, code => 0x8ff4e4, comment => '# CJK(8807)'}, ucs => 0x5BEC,
{direction => BOTH, ucs => 0x8B7F, code => 0x8ff4e6, comment => '# CJK(8B7F)'}, code => 0x8ff4ba,
{direction => BOTH, ucs => 0x8CF4, code => 0x8ff4e7, comment => '# CJK(8CF4)'}, comment => '# CJK(5BEC)' },
{direction => BOTH, ucs => 0x8D76, code => 0x8ff4e8, comment => '# CJK(8D76)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x90DE, code => 0x8ff4ec, comment => '# CJK(90DE)'}, ucs => 0x5CF5,
{direction => BOTH, ucs => 0x9115, code => 0x8ff4ee, comment => '# CJK(9115)'}, code => 0x8ff4bb,
{direction => BOTH, ucs => 0x9592, code => 0x8ff4f1, comment => '# CJK(9592)'}, comment => '# CJK(5CF5)' },
{direction => BOTH, ucs => 0x973B, code => 0x8ff4f4, comment => '# CJK(973B)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x974D, code => 0x8ff4f5, comment => '# CJK(974D)'}, ucs => 0x5D53,
{direction => BOTH, ucs => 0x9751, code => 0x8ff4f6, comment => '# CJK(9751)'}, code => 0x8ff4bc,
{direction => BOTH, ucs => 0x999E, code => 0x8ff4fa, comment => '# CJK(999E)'}, comment => '# CJK(5D53)' },
{direction => BOTH, ucs => 0x9AD9, code => 0x8ff4fb, comment => '# CJK(9AD9)'}, { direction => BOTH,
{direction => BOTH, ucs => 0x9B72, code => 0x8ff4fc, comment => '# CJK(9B72)'}, ucs => 0x5FB7,
{direction => BOTH, ucs => 0x9ED1, code => 0x8ff4fe, comment => '# CJK(9ED1)'}, code => 0x8ff4be,
{direction => BOTH, ucs => 0xF929, code => 0x8ff4c5, comment => '# CJK COMPATIBILITY IDEOGRAPH-F929'}, comment => '# CJK(5FB7)' },
{direction => BOTH, ucs => 0xF9DC, code => 0x8ff4f2, comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFA0E, code => 0x8ff4b4, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'}, ucs => 0x6085,
{direction => BOTH, ucs => 0xFA0F, code => 0x8ff4b7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'}, code => 0x8ff4bf,
{direction => BOTH, ucs => 0xFA10, code => 0x8ff4b8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10'}, comment => '# CJK(6085)' },
{direction => BOTH, ucs => 0xFA11, code => 0x8ff4bd, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFA12, code => 0x8ff4c4, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12'}, ucs => 0x6120,
{direction => BOTH, ucs => 0xFA13, code => 0x8ff4c7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13'}, code => 0x8ff4c0,
{direction => BOTH, ucs => 0xFA14, code => 0x8ff4c8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14'}, comment => '# CJK(6120)' },
{direction => BOTH, ucs => 0xFA15, code => 0x8ff4ce, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFA16, code => 0x8ff4cf, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16'}, ucs => 0x654E,
{direction => BOTH, ucs => 0xFA17, code => 0x8ff4d3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17'}, code => 0x8ff4c1,
{direction => BOTH, ucs => 0xFA18, code => 0x8ff4d5, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18'}, comment => '# CJK(654E)' },
{direction => BOTH, ucs => 0xFA19, code => 0x8ff4d6, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFA1A, code => 0x8ff4d7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'}, ucs => 0x663B,
{direction => BOTH, ucs => 0xFA1B, code => 0x8ff4d8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'}, code => 0x8ff4c2,
{direction => BOTH, ucs => 0xFA1C, code => 0x8ff4da, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'}, comment => '# CJK(663B)' },
{direction => BOTH, ucs => 0xFA1D, code => 0x8ff4db, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFA1E, code => 0x8ff4de, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'}, ucs => 0x6665,
{direction => BOTH, ucs => 0xFA1F, code => 0x8ff4e0, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'}, code => 0x8ff4c3,
{direction => BOTH, ucs => 0xFA20, code => 0x8ff4e2, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20'}, comment => '# CJK(6665)' },
{direction => BOTH, ucs => 0xFA21, code => 0x8ff4e3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFA22, code => 0x8ff4e5, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22'}, ucs => 0x6801,
{direction => BOTH, ucs => 0xFA23, code => 0x8ff4e9, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23'}, code => 0x8ff4c6,
{direction => BOTH, ucs => 0xFA24, code => 0x8ff4ea, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24'}, comment => '# CJK(6801)' },
{direction => BOTH, ucs => 0xFA25, code => 0x8ff4eb, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFA26, code => 0x8ff4ed, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26'}, ucs => 0x6A6B,
{direction => BOTH, ucs => 0xFA27, code => 0x8ff4ef, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27'}, code => 0x8ff4c9,
{direction => BOTH, ucs => 0xFA28, code => 0x8ff4f0, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28'}, comment => '# CJK(6A6B)' },
{direction => BOTH, ucs => 0xFA29, code => 0x8ff4f3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFA2A, code => 0x8ff4f7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'}, ucs => 0x6AE2,
{direction => BOTH, ucs => 0xFA2B, code => 0x8ff4f8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'}, code => 0x8ff4ca,
{direction => BOTH, ucs => 0xFA2C, code => 0x8ff4f9, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'}, comment => '# CJK(6AE2)' },
{direction => BOTH, ucs => 0xFA2D, code => 0x8ff4fd, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'}, { direction => BOTH,
{direction => BOTH, ucs => 0xFF07, code => 0x8ff4a9, comment => '# FULLWIDTH APOSTROPHE'}, ucs => 0x6DF2,
{direction => BOTH, ucs => 0xFFE4, code => 0x8fa2c3, comment => '# FULLWIDTH BROKEN BAR'}, code => 0x8ff4cc,
comment => '# CJK(6DF2)' },
{ direction => BOTH,
ucs => 0x6DF8,
code => 0x8ff4cb,
comment => '# CJK(6DF8)' },
{ direction => BOTH,
ucs => 0x7028,
code => 0x8ff4cd,
comment => '# CJK(7028)' },
{ direction => BOTH,
ucs => 0x70BB,
code => 0x8ff4ae,
comment => '# CJK(70BB)' },
{ direction => BOTH,
ucs => 0x7501,
code => 0x8ff4d0,
comment => '# CJK(7501)' },
{ direction => BOTH,
ucs => 0x7682,
code => 0x8ff4d1,
comment => '# CJK(7682)' },
{ direction => BOTH,
ucs => 0x769E,
code => 0x8ff4d2,
comment => '# CJK(769E)' },
{ direction => BOTH,
ucs => 0x7930,
code => 0x8ff4d4,
comment => '# CJK(7930)' },
{ direction => BOTH,
ucs => 0x7AE7,
code => 0x8ff4d9,
comment => '# CJK(7AE7)' },
{ direction => BOTH,
ucs => 0x7DA0,
code => 0x8ff4dc,
comment => '# CJK(7DA0)' },
{ direction => BOTH,
ucs => 0x7DD6,
code => 0x8ff4dd,
comment => '# CJK(7DD6)' },
{ direction => BOTH,
ucs => 0x8362,
code => 0x8ff4df,
comment => '# CJK(8362)' },
{ direction => BOTH,
ucs => 0x85B0,
code => 0x8ff4e1,
comment => '# CJK(85B0)' },
{ direction => BOTH,
ucs => 0x8807,
code => 0x8ff4e4,
comment => '# CJK(8807)' },
{ direction => BOTH,
ucs => 0x8B7F,
code => 0x8ff4e6,
comment => '# CJK(8B7F)' },
{ direction => BOTH,
ucs => 0x8CF4,
code => 0x8ff4e7,
comment => '# CJK(8CF4)' },
{ direction => BOTH,
ucs => 0x8D76,
code => 0x8ff4e8,
comment => '# CJK(8D76)' },
{ direction => BOTH,
ucs => 0x90DE,
code => 0x8ff4ec,
comment => '# CJK(90DE)' },
{ direction => BOTH,
ucs => 0x9115,
code => 0x8ff4ee,
comment => '# CJK(9115)' },
{ direction => BOTH,
ucs => 0x9592,
code => 0x8ff4f1,
comment => '# CJK(9592)' },
{ direction => BOTH,
ucs => 0x973B,
code => 0x8ff4f4,
comment => '# CJK(973B)' },
{ direction => BOTH,
ucs => 0x974D,
code => 0x8ff4f5,
comment => '# CJK(974D)' },
{ direction => BOTH,
ucs => 0x9751,
code => 0x8ff4f6,
comment => '# CJK(9751)' },
{ direction => BOTH,
ucs => 0x999E,
code => 0x8ff4fa,
comment => '# CJK(999E)' },
{ direction => BOTH,
ucs => 0x9AD9,
code => 0x8ff4fb,
comment => '# CJK(9AD9)' },
{ direction => BOTH,
ucs => 0x9B72,
code => 0x8ff4fc,
comment => '# CJK(9B72)' },
{ direction => BOTH,
ucs => 0x9ED1,
code => 0x8ff4fe,
comment => '# CJK(9ED1)' },
{ direction => BOTH,
ucs => 0xF929,
code => 0x8ff4c5,
comment => '# CJK COMPATIBILITY IDEOGRAPH-F929' },
{ direction => BOTH,
ucs => 0xF9DC,
code => 0x8ff4f2,
comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC' },
{ direction => BOTH,
ucs => 0xFA0E,
code => 0x8ff4b4,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E' },
{ direction => BOTH,
ucs => 0xFA0F,
code => 0x8ff4b7,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F' },
{ direction => BOTH,
ucs => 0xFA10,
code => 0x8ff4b8,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10' },
{ direction => BOTH,
ucs => 0xFA11,
code => 0x8ff4bd,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11' },
{ direction => BOTH,
ucs => 0xFA12,
code => 0x8ff4c4,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12' },
{ direction => BOTH,
ucs => 0xFA13,
code => 0x8ff4c7,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13' },
{ direction => BOTH,
ucs => 0xFA14,
code => 0x8ff4c8,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14' },
{ direction => BOTH,
ucs => 0xFA15,
code => 0x8ff4ce,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15' },
{ direction => BOTH,
ucs => 0xFA16,
code => 0x8ff4cf,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16' },
{ direction => BOTH,
ucs => 0xFA17,
code => 0x8ff4d3,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17' },
{ direction => BOTH,
ucs => 0xFA18,
code => 0x8ff4d5,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18' },
{ direction => BOTH,
ucs => 0xFA19,
code => 0x8ff4d6,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19' },
{ direction => BOTH,
ucs => 0xFA1A,
code => 0x8ff4d7,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A' },
{ direction => BOTH,
ucs => 0xFA1B,
code => 0x8ff4d8,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B' },
{ direction => BOTH,
ucs => 0xFA1C,
code => 0x8ff4da,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C' },
{ direction => BOTH,
ucs => 0xFA1D,
code => 0x8ff4db,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D' },
{ direction => BOTH,
ucs => 0xFA1E,
code => 0x8ff4de,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E' },
{ direction => BOTH,
ucs => 0xFA1F,
code => 0x8ff4e0,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F' },
{ direction => BOTH,
ucs => 0xFA20,
code => 0x8ff4e2,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20' },
{ direction => BOTH,
ucs => 0xFA21,
code => 0x8ff4e3,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21' },
{ direction => BOTH,
ucs => 0xFA22,
code => 0x8ff4e5,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22' },
{ direction => BOTH,
ucs => 0xFA23,
code => 0x8ff4e9,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23' },
{ direction => BOTH,
ucs => 0xFA24,
code => 0x8ff4ea,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24' },
{ direction => BOTH,
ucs => 0xFA25,
code => 0x8ff4eb,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25' },
{ direction => BOTH,
ucs => 0xFA26,
code => 0x8ff4ed,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26' },
{ direction => BOTH,
ucs => 0xFA27,
code => 0x8ff4ef,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27' },
{ direction => BOTH,
ucs => 0xFA28,
code => 0x8ff4f0,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28' },
{ direction => BOTH,
ucs => 0xFA29,
code => 0x8ff4f3,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29' },
{ direction => BOTH,
ucs => 0xFA2A,
code => 0x8ff4f7,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A' },
{ direction => BOTH,
ucs => 0xFA2B,
code => 0x8ff4f8,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B' },
{ direction => BOTH,
ucs => 0xFA2C,
code => 0x8ff4f9,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C' },
{ direction => BOTH,
ucs => 0xFA2D,
code => 0x8ff4fd,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D' },
{ direction => BOTH,
ucs => 0xFF07,
code => 0x8ff4a9,
comment => '# FULLWIDTH APOSTROPHE' },
{ direction => BOTH,
ucs => 0xFFE4,
code => 0x8fa2c3,
comment => '# FULLWIDTH BROKEN BAR' },
# additional conversions for EUC_JP -> UTF-8 conversion # additional conversions for EUC_JP -> UTF-8 conversion
{direction => TO_UNICODE, ucs => 0x2116, code => 0x8ff4ac, comment => '# NUMERO SIGN'}, { direction => TO_UNICODE,
{direction => TO_UNICODE, ucs => 0x2121, code => 0x8ff4ad, comment => '# TELEPHONE SIGN'}, ucs => 0x2116,
{direction => TO_UNICODE, ucs => 0x3231, code => 0x8ff4ab, comment => '# PARENTHESIZED IDEOGRAPH STOCK'} code => 0x8ff4ac,
); comment => '# NUMERO SIGN' },
{ direction => TO_UNICODE,
ucs => 0x2121,
code => 0x8ff4ad,
comment => '# TELEPHONE SIGN' },
{ direction => TO_UNICODE,
ucs => 0x3231,
code => 0x8ff4ab,
comment => '# PARENTHESIZED IDEOGRAPH STOCK' });
print_conversion_tables($this_script, "EUC_JP", \@mapping); print_conversion_tables($this_script, "EUC_JP", \@mapping);
@ -215,6 +484,7 @@ sub sjis2jis
if ($pos >= 114 * 0x5e && $pos <= 115 * 0x5e + 0x1b) if ($pos >= 114 * 0x5e && $pos <= 115 * 0x5e + 0x1b)
{ {
# This region (115-ku) is out of range of JIS code but for # This region (115-ku) is out of range of JIS code but for
# convenient to generate code in EUC CODESET 3, move this to # convenient to generate code in EUC CODESET 3, move this to
# seemingly duplicate region (83-84-ku). # seemingly duplicate region (83-84-ku).

View File

@ -31,10 +31,24 @@ foreach my $i (@$mapping)
} }
# Some extra characters that are not in KSX1001.TXT # Some extra characters that are not in KSX1001.TXT
push @$mapping,( push @$mapping,
{direction => BOTH, ucs => 0x20AC, code => 0xa2e6, comment => '# EURO SIGN', f => $this_script, l => __LINE__}, ( { direction => BOTH,
{direction => BOTH, ucs => 0x00AE, code => 0xa2e7, comment => '# REGISTERED SIGN', f => $this_script, l => __LINE__ }, ucs => 0x20AC,
{direction => BOTH, ucs => 0x327E, code => 0xa2e8, comment => '# CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ } code => 0xa2e6,
); comment => '# EURO SIGN',
f => $this_script,
l => __LINE__ },
{ direction => BOTH,
ucs => 0x00AE,
code => 0xa2e7,
comment => '# REGISTERED SIGN',
f => $this_script,
l => __LINE__ },
{ direction => BOTH,
ucs => 0x327E,
code => 0xa2e8,
comment => '# CIRCLED HANGUL IEUNG U',
f => $this_script,
l => __LINE__ });
print_conversion_tables($this_script, "EUC_KR", $mapping); print_conversion_tables($this_script, "EUC_KR", $mapping);

View File

@ -52,14 +52,13 @@ foreach my $i (@$mapping)
# Some codes are mapped twice in the EUC_TW to UTF-8 table. # Some codes are mapped twice in the EUC_TW to UTF-8 table.
if ($origcode >= 0x12121 && $origcode <= 0x20000) if ($origcode >= 0x12121 && $origcode <= 0x20000)
{ {
push @extras, { push @extras,
ucs => $i->{ucs}, { ucs => $i->{ucs},
code => ($i->{code} + 0x8ea10000), code => ($i->{code} + 0x8ea10000),
rest => $i->{rest}, rest => $i->{rest},
direction => TO_UNICODE, direction => TO_UNICODE,
f => $i->{f}, f => $i->{f},
l => $i->{l} l => $i->{l} };
};
} }
} }

View File

@ -35,13 +35,12 @@ while (<$in>)
my $code = hex($c); my $code = hex($c);
if ($code >= 0x80 && $ucs >= 0x0080) if ($code >= 0x80 && $ucs >= 0x0080)
{ {
push @mapping, { push @mapping,
ucs => $ucs, { ucs => $ucs,
code => $code, code => $code,
direction => BOTH, direction => BOTH,
f => $in_file, f => $in_file,
l => $. l => $. };
};
} }
} }
close($in); close($in);

View File

@ -25,10 +25,24 @@ my $this_script = $0;
my $mapping = &read_source("JOHAB.TXT"); my $mapping = &read_source("JOHAB.TXT");
# Some extra characters that are not in JOHAB.TXT # Some extra characters that are not in JOHAB.TXT
push @$mapping, ( push @$mapping,
{direction => BOTH, ucs => 0x20AC, code => 0xd9e6, comment => '# EURO SIGN', f => $this_script, l => __LINE__ }, ( { direction => BOTH,
{direction => BOTH, ucs => 0x00AE, code => 0xd9e7, comment => '# REGISTERED SIGN', f => $this_script, l => __LINE__ }, ucs => 0x20AC,
{direction => BOTH, ucs => 0x327E, code => 0xd9e8, comment => '# CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ } code => 0xd9e6,
); comment => '# EURO SIGN',
f => $this_script,
l => __LINE__ },
{ direction => BOTH,
ucs => 0x00AE,
code => 0xd9e7,
comment => '# REGISTERED SIGN',
f => $this_script,
l => __LINE__ },
{ direction => BOTH,
ucs => 0x327E,
code => 0xd9e8,
comment => '# CIRCLED HANGUL IEUNG U',
f => $this_script,
l => __LINE__ });
print_conversion_tables($this_script, "JOHAB", $mapping); print_conversion_tables($this_script, "JOHAB", $mapping);

View File

@ -24,6 +24,7 @@ while (my $line = <$in>)
{ {
if ($line =~ /^0x(.*)[ \t]*U\+(.*)\+(.*)[ \t]*#(.*)$/) if ($line =~ /^0x(.*)[ \t]*U\+(.*)\+(.*)[ \t]*#(.*)$/)
{ {
# combined characters # combined characters
my ($c, $u1, $u2) = ($1, $2, $3); my ($c, $u1, $u2) = ($1, $2, $3);
my $rest = "U+" . $u1 . "+" . $u2 . $4; my $rest = "U+" . $u1 . "+" . $u2 . $4;
@ -31,18 +32,18 @@ while (my $line = <$in>)
my $ucs1 = hex($u1); my $ucs1 = hex($u1);
my $ucs2 = hex($u2); my $ucs2 = hex($u2);
push @mapping, { push @mapping,
code => $code, { code => $code,
ucs => $ucs1, ucs => $ucs1,
ucs_second => $ucs2, ucs_second => $ucs2,
comment => $rest, comment => $rest,
direction => BOTH, direction => BOTH,
f => $in_file, f => $in_file,
l => $. l => $. };
};
} }
elsif ($line =~ /^0x(.*)[ \t]*U\+(.*)[ \t]*#(.*)$/) elsif ($line =~ /^0x(.*)[ \t]*U\+(.*)[ \t]*#(.*)$/)
{ {
# non-combined characters # non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3); my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
my $ucs = hex($u); my $ucs = hex($u);
@ -66,14 +67,13 @@ while (my $line = <$in>)
$direction = BOTH; $direction = BOTH;
} }
push @mapping, { push @mapping,
code => $code, { code => $code,
ucs => $ucs, ucs => $ucs,
comment => $rest, comment => $rest,
direction => $direction, direction => $direction,
f => $in_file, f => $in_file,
l => $. l => $. };
};
} }
} }
close($in); close($in);

View File

@ -20,9 +20,8 @@ my $mapping = read_source("CP932.TXT");
# Drop these SJIS codes from the source for UTF8=>SJIS conversion # Drop these SJIS codes from the source for UTF8=>SJIS conversion
my @reject_sjis = ( my @reject_sjis = (
0xed40 .. 0xeefc, 0x8754 .. 0x875d, 0x878a, 0x8782, 0xed40 .. 0xeefc, 0x8754 .. 0x875d, 0x878a, 0x8782,
0x8784, 0xfa5b, 0xfa54, 0x8790..0x8792, 0x8795..0x8797, 0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792,
0x879a..0x879c 0x8795 .. 0x8797, 0x879a .. 0x879c);
);
foreach my $i (@$mapping) foreach my $i (@$mapping)
{ {
@ -36,15 +35,54 @@ foreach my $i (@$mapping)
} }
# Add these UTF8->SJIS pairs to the table. # Add these UTF8->SJIS pairs to the table.
push @$mapping, ( push @$mapping,
{direction => FROM_UNICODE, ucs => 0x00a2, code => 0x8191, comment => '# CENT SIGN', f => $this_script, l => __LINE__ }, ( { direction => FROM_UNICODE,
{direction => FROM_UNICODE, ucs => 0x00a3, code => 0x8192, comment => '# POUND SIGN', f => $this_script, l => __LINE__ }, ucs => 0x00a2,
{direction => FROM_UNICODE, ucs => 0x00a5, code => 0x5c, comment => '# YEN SIGN', f => $this_script, l => __LINE__ }, code => 0x8191,
{direction => FROM_UNICODE, ucs => 0x00ac, code => 0x81ca, comment => '# NOT SIGN', f => $this_script, l => __LINE__ }, comment => '# CENT SIGN',
{direction => FROM_UNICODE, ucs => 0x2016, code => 0x8161, comment => '# DOUBLE VERTICAL LINE', f => $this_script, l => __LINE__ }, f => $this_script,
{direction => FROM_UNICODE, ucs => 0x203e, code => 0x7e, comment => '# OVERLINE', f => $this_script, l => __LINE__ }, l => __LINE__ },
{direction => FROM_UNICODE, ucs => 0x2212, code => 0x817c, comment => '# MINUS SIGN', f => $this_script, l => __LINE__ }, { direction => FROM_UNICODE,
{direction => FROM_UNICODE, ucs => 0x301c, code => 0x8160, comment => '# WAVE DASH', f => $this_script, l => __LINE__ } ucs => 0x00a3,
); code => 0x8192,
comment => '# POUND SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x00a5,
code => 0x5c,
comment => '# YEN SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x00ac,
code => 0x81ca,
comment => '# NOT SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x2016,
code => 0x8161,
comment => '# DOUBLE VERTICAL LINE',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x203e,
code => 0x7e,
comment => '# OVERLINE',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x2212,
code => 0x817c,
comment => '# MINUS SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x301c,
code => 0x8160,
comment => '# WAVE DASH',
f => $this_script,
l => __LINE__ });
print_conversion_tables($this_script, "SJIS", $mapping); print_conversion_tables($this_script, "SJIS", $mapping);

View File

@ -38,18 +38,23 @@ while (<$in>)
if ($code >= 0x80 && $ucs >= 0x0080) if ($code >= 0x80 && $ucs >= 0x0080)
{ {
push @mapping, { push @mapping,
ucs => $ucs, { ucs => $ucs,
code => $code, code => $code,
direction => BOTH, direction => BOTH,
f => $in_file, f => $in_file,
l => $. l => $. };
};
} }
} }
close($in); close($in);
# One extra character that's not in the source file. # One extra character that's not in the source file.
push @mapping, { direction => BOTH, code => 0xa2e8, ucs => 0x327e, comment => 'CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ }; push @mapping,
{ direction => BOTH,
code => 0xa2e8,
ucs => 0x327e,
comment => 'CIRCLED HANGUL IEUNG U',
f => $this_script,
l => __LINE__ };
print_conversion_tables($this_script, "UHC", \@mapping); print_conversion_tables($this_script, "UHC", \@mapping);

View File

@ -9,15 +9,15 @@ use strict;
use Exporter 'import'; use Exporter 'import';
our @EXPORT = qw( NONE TO_UNICODE FROM_UNICODE BOTH read_source print_conversion_tables); our @EXPORT =
qw( NONE TO_UNICODE FROM_UNICODE BOTH read_source print_conversion_tables);
# Constants used in the 'direction' field of the character maps # Constants used in the 'direction' field of the character maps
use constant { use constant {
NONE => 0, NONE => 0,
TO_UNICODE => 1, TO_UNICODE => 1,
FROM_UNICODE => 2, FROM_UNICODE => 2,
BOTH => 3 BOTH => 3 };
};
####################################################################### #######################################################################
# read_source - common routine to read source file # read_source - common routine to read source file
@ -49,13 +49,13 @@ sub read_source
print STDERR "READ ERROR at line $. in $fname: $_\n"; print STDERR "READ ERROR at line $. in $fname: $_\n";
exit; exit;
} }
my $out = {code => hex($1), my $out = {
code => hex($1),
ucs => hex($2), ucs => hex($2),
comment => $4, comment => $4,
direction => BOTH, direction => BOTH,
f => $fname, f => $fname,
l => $. l => $. };
};
# Ignore pure ASCII mappings. PostgreSQL character conversion code # Ignore pure ASCII mappings. PostgreSQL character conversion code
# never even passes these to the conversion code. # never even passes these to the conversion code.
@ -92,8 +92,10 @@ sub print_conversion_tables
{ {
my ($this_script, $csname, $charset) = @_; my ($this_script, $csname, $charset) = @_;
print_conversion_tables_direction($this_script, $csname, FROM_UNICODE, $charset); print_conversion_tables_direction($this_script, $csname, FROM_UNICODE,
print_conversion_tables_direction($this_script, $csname, TO_UNICODE, $charset); $charset);
print_conversion_tables_direction($this_script, $csname, TO_UNICODE,
$charset);
} }
############################################################################# #############################################################################
@ -146,13 +148,11 @@ sub print_conversion_tables_direction
{ {
if ($direction == TO_UNICODE) if ($direction == TO_UNICODE)
{ {
print_to_utf8_combined_map($out, $csname, print_to_utf8_combined_map($out, $csname, $charmap_combined, 1);
$charmap_combined, 1);
} }
else else
{ {
print_from_utf8_combined_map($out, $csname, print_from_utf8_combined_map($out, $csname, $charmap_combined, 1);
$charmap_combined, 1);
} }
} }
@ -166,14 +166,16 @@ sub print_from_utf8_combined_map
my $last_comment = ""; my $last_comment = "";
printf $out "\n/* Combined character map */\n"; printf $out "\n/* Combined character map */\n";
printf $out "static const pg_utf_to_local_combined ULmap${charset}_combined[ %d ] = {", printf $out
"static const pg_utf_to_local_combined ULmap${charset}_combined[ %d ] = {",
scalar(@$table); scalar(@$table);
my $first = 1; my $first = 1;
foreach my $i (sort { $a->{utf8} <=> $b->{utf8} } @$table) foreach my $i (sort { $a->{utf8} <=> $b->{utf8} } @$table)
{ {
print($out ",") if (!$first); print($out ",") if (!$first);
$first = 0; $first = 0;
print $out "\t/* $last_comment */" if ($verbose && $last_comment ne ""); print $out "\t/* $last_comment */"
if ($verbose && $last_comment ne "");
printf $out "\n {0x%08x, 0x%08x, 0x%04x}", printf $out "\n {0x%08x, 0x%08x, 0x%04x}",
$i->{utf8}, $i->{utf8_second}, $i->{code}; $i->{utf8}, $i->{utf8_second}, $i->{code};
@ -198,7 +200,8 @@ sub print_to_utf8_combined_map
my $last_comment = ""; my $last_comment = "";
printf $out "\n/* Combined character map */\n"; printf $out "\n/* Combined character map */\n";
printf $out "static const pg_local_to_utf_combined LUmap${charset}_combined[ %d ] = {", printf $out
"static const pg_local_to_utf_combined LUmap${charset}_combined[ %d ] = {",
scalar(@$table); scalar(@$table);
my $first = 1; my $first = 1;
@ -206,7 +209,8 @@ sub print_to_utf8_combined_map
{ {
print($out ",") if (!$first); print($out ",") if (!$first);
$first = 0; $first = 0;
print $out "\t/* $last_comment */" if ($verbose && $last_comment ne ""); print $out "\t/* $last_comment */"
if ($verbose && $last_comment ne "");
printf $out "\n {0x%04x, 0x%08x, 0x%08x}", printf $out "\n {0x%04x, 0x%08x, 0x%08x}",
$i->{code}, $i->{utf8}, $i->{utf8_second}; $i->{code}, $i->{utf8}, $i->{utf8_second};
@ -309,10 +313,14 @@ sub print_radix_table
### ###
# Add the segments for the radix trees themselves. # Add the segments for the radix trees themselves.
push @segments, build_segments_from_tree("Single byte table", "1-byte", 1, \%b1map); push @segments,
push @segments, build_segments_from_tree("Two byte table", "2-byte", 2, \%b2map); build_segments_from_tree("Single byte table", "1-byte", 1, \%b1map);
push @segments, build_segments_from_tree("Three byte table", "3-byte", 3, \%b3map); push @segments,
push @segments, build_segments_from_tree("Four byte table", "4-byte", 4, \%b4map); build_segments_from_tree("Two byte table", "2-byte", 2, \%b2map);
push @segments,
build_segments_from_tree("Three byte table", "3-byte", 3, \%b3map);
push @segments,
build_segments_from_tree("Four byte table", "4-byte", 4, \%b4map);
### ###
### Find min and max index used in each level of each tree. ### Find min and max index used in each level of each tree.
@ -337,6 +345,7 @@ sub print_radix_table
$min_idx{ $seg->{depth} }{ $seg->{level} } = $this_min; $min_idx{ $seg->{depth} }{ $seg->{level} } = $this_min;
$max_idx{ $seg->{depth} }{ $seg->{level} } = $this_max; $max_idx{ $seg->{depth} }{ $seg->{level} } = $this_max;
} }
# Copy the mins and max's back to every segment, for convenience. # Copy the mins and max's back to every segment, for convenience.
foreach my $seg (@segments) foreach my $seg (@segments)
{ {
@ -359,11 +368,10 @@ sub print_radix_table
$widest_range = $this_range if ($this_range > $widest_range); $widest_range = $this_range if ($this_range > $widest_range);
} }
unshift @segments, { unshift @segments,
header => "Dummy map, for invalid values", { header => "Dummy map, for invalid values",
min_idx => 0, min_idx => 0,
max_idx => $widest_range max_idx => $widest_range };
};
### ###
### Eliminate overlapping zeros ### Eliminate overlapping zeros
@ -383,21 +391,29 @@ sub print_radix_table
# Count the number of zero values at the end of this segment. # Count the number of zero values at the end of this segment.
my $this_trail_zeros = 0; my $this_trail_zeros = 0;
for (my $i = $seg->{max_idx}; $i >= $seg->{min_idx} && !$seg->{values}->{$i}; $i--) for (
my $i = $seg->{max_idx};
$i >= $seg->{min_idx} && !$seg->{values}->{$i};
$i--)
{ {
$this_trail_zeros++; $this_trail_zeros++;
} }
# Count the number of zeros at the beginning of next segment. # Count the number of zeros at the beginning of next segment.
my $next_lead_zeros = 0; my $next_lead_zeros = 0;
for (my $i = $nextseg->{min_idx}; $i <= $nextseg->{max_idx} && !$nextseg->{values}->{$i}; $i++) for (
my $i = $nextseg->{min_idx};
$i <= $nextseg->{max_idx} && !$nextseg->{values}->{$i};
$i++)
{ {
$next_lead_zeros++; $next_lead_zeros++;
} }
# How many zeros in common? # How many zeros in common?
my $overlaid_trail_zeros = my $overlaid_trail_zeros =
($this_trail_zeros > $next_lead_zeros) ? $next_lead_zeros : $this_trail_zeros; ($this_trail_zeros > $next_lead_zeros)
? $next_lead_zeros
: $this_trail_zeros;
$seg->{overlaid_trail_zeros} = $overlaid_trail_zeros; $seg->{overlaid_trail_zeros} = $overlaid_trail_zeros;
$seg->{max_idx} = $seg->{max_idx} - $overlaid_trail_zeros; $seg->{max_idx} = $seg->{max_idx} - $overlaid_trail_zeros;
@ -529,17 +545,20 @@ sub print_radix_table
print $out " ${tblname}_table,\n"; print $out " ${tblname}_table,\n";
} }
printf $out "\n"; printf $out "\n";
printf $out " 0x%04x, /* offset of table for 1-byte inputs */\n", $b1root; printf $out " 0x%04x, /* offset of table for 1-byte inputs */\n",
$b1root;
printf $out " 0x%02x, /* b1_lower */\n", $b1_lower; printf $out " 0x%02x, /* b1_lower */\n", $b1_lower;
printf $out " 0x%02x, /* b1_upper */\n", $b1_upper; printf $out " 0x%02x, /* b1_upper */\n", $b1_upper;
printf $out "\n"; printf $out "\n";
printf $out " 0x%04x, /* offset of table for 2-byte inputs */\n", $b2root; printf $out " 0x%04x, /* offset of table for 2-byte inputs */\n",
$b2root;
printf $out " 0x%02x, /* b2_1_lower */\n", $b2_1_lower; printf $out " 0x%02x, /* b2_1_lower */\n", $b2_1_lower;
printf $out " 0x%02x, /* b2_1_upper */\n", $b2_1_upper; printf $out " 0x%02x, /* b2_1_upper */\n", $b2_1_upper;
printf $out " 0x%02x, /* b2_2_lower */\n", $b2_2_lower; printf $out " 0x%02x, /* b2_2_lower */\n", $b2_2_lower;
printf $out " 0x%02x, /* b2_2_upper */\n", $b2_2_upper; printf $out " 0x%02x, /* b2_2_upper */\n", $b2_2_upper;
printf $out "\n"; printf $out "\n";
printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n", $b3root; printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n",
$b3root;
printf $out " 0x%02x, /* b3_1_lower */\n", $b3_1_lower; printf $out " 0x%02x, /* b3_1_lower */\n", $b3_1_lower;
printf $out " 0x%02x, /* b3_1_upper */\n", $b3_1_upper; printf $out " 0x%02x, /* b3_1_upper */\n", $b3_1_upper;
printf $out " 0x%02x, /* b3_2_lower */\n", $b3_2_lower; printf $out " 0x%02x, /* b3_2_lower */\n", $b3_2_lower;
@ -547,7 +566,8 @@ sub print_radix_table
printf $out " 0x%02x, /* b3_3_lower */\n", $b3_3_lower; printf $out " 0x%02x, /* b3_3_lower */\n", $b3_3_lower;
printf $out " 0x%02x, /* b3_3_upper */\n", $b3_3_upper; printf $out " 0x%02x, /* b3_3_upper */\n", $b3_3_upper;
printf $out "\n"; printf $out "\n";
printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n", $b4root; printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n",
$b4root;
printf $out " 0x%02x, /* b4_1_lower */\n", $b4_1_lower; printf $out " 0x%02x, /* b4_1_lower */\n", $b4_1_lower;
printf $out " 0x%02x, /* b4_1_upper */\n", $b4_1_upper; printf $out " 0x%02x, /* b4_1_upper */\n", $b4_1_upper;
printf $out " 0x%02x, /* b4_2_lower */\n", $b4_2_lower; printf $out " 0x%02x, /* b4_2_lower */\n", $b4_2_lower;
@ -561,6 +581,7 @@ sub print_radix_table
print $out "static const $datatype ${tblname}_table[$tblsize] =\n"; print $out "static const $datatype ${tblname}_table[$tblsize] =\n";
print $out "{"; print $out "{";
my $off = 0; my $off = 0;
foreach my $seg (@segments) foreach my $seg (@segments)
{ {
printf $out "\n"; printf $out "\n";
@ -569,10 +590,12 @@ sub print_radix_table
for (my $i = $seg->{min_idx}; $i <= $seg->{max_idx};) for (my $i = $seg->{min_idx}; $i <= $seg->{max_idx};)
{ {
# Print the next line's worth of values. # Print the next line's worth of values.
# XXX pad to begin at a nice boundary # XXX pad to begin at a nice boundary
printf $out " /* %02x */ ", $i; printf $out " /* %02x */ ", $i;
for (my $j = 0; $j < $vals_per_line && $i <= $seg->{max_idx}; $j++) for (my $j = 0;
$j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
{ {
my $val = $seg->{values}->{$i}; my $val = $seg->{values}->{$i};
@ -588,7 +611,8 @@ sub print_radix_table
} }
if ($seg->{overlaid_trail_zeros}) if ($seg->{overlaid_trail_zeros})
{ {
printf $out " /* $seg->{overlaid_trail_zeros} trailing zero values shared with next segment */\n"; printf $out
" /* $seg->{overlaid_trail_zeros} trailing zero values shared with next segment */\n";
} }
} }
@ -607,12 +631,13 @@ sub build_segments_from_tree
if (%{$map}) if (%{$map})
{ {
@segments = build_segments_recurse($header, $rootlabel, "", 1, $depth, $map); @segments =
build_segments_recurse($header, $rootlabel, "", 1, $depth, $map);
# Sort the segments into "breadth-first" order. Not strictly required, # Sort the segments into "breadth-first" order. Not strictly required,
# but makes the maps nicer to read. # but makes the maps nicer to read.
@segments = sort { $a->{level} cmp $b->{level} or @segments =
$a->{path} cmp $b->{path}} sort { $a->{level} cmp $b->{level} or $a->{path} cmp $b->{path} }
@segments; @segments;
} }
@ -628,14 +653,13 @@ sub build_segments_recurse
if ($level == $depth) if ($level == $depth)
{ {
push @segments, { push @segments,
header => $header . ", leaf: ${path}xx", { header => $header . ", leaf: ${path}xx",
label => $label, label => $label,
level => $level, level => $level,
depth => $depth, depth => $depth,
path => $path, path => $path,
values => $map values => $map };
};
} }
else else
{ {
@ -646,19 +670,19 @@ sub build_segments_recurse
my $childpath = $path . sprintf("%02x", $i); my $childpath = $path . sprintf("%02x", $i);
my $childlabel = "$depth-level-$level-$childpath"; my $childlabel = "$depth-level-$level-$childpath";
push @segments, build_segments_recurse($header, $childlabel, $childpath, push @segments,
build_segments_recurse($header, $childlabel, $childpath,
$level + 1, $depth, $val); $level + 1, $depth, $val);
$children{$i} = $childlabel; $children{$i} = $childlabel;
} }
push @segments, { push @segments,
header => $header . ", byte #$level: ${path}xx", { header => $header . ", byte #$level: ${path}xx",
label => $label, label => $label,
level => $level, level => $level,
depth => $depth, depth => $depth,
path => $path, path => $path,
values => \%children values => \%children };
};
} }
return @segments; return @segments;
} }
@ -688,6 +712,7 @@ sub make_charmap
my %charmap; my %charmap;
foreach my $c (@$charset) foreach my $c (@$charset)
{ {
# combined characters are handled elsewhere # combined characters are handled elsewhere
next if (defined $c->{ucs_second}); next if (defined $c->{ucs_second});
@ -710,7 +735,8 @@ sub make_charmap
if ($verbose) if ($verbose)
{ {
printf $out "0x%04x 0x%04x %s:%d %s\n", $src, $dst, $c->{f}, $c->{l}, $c->{comment}; printf $out "0x%04x 0x%04x %s:%d %s\n", $src, $dst, $c->{f},
$c->{l}, $c->{comment};
} }
} }
if ($verbose) if ($verbose)
@ -743,11 +769,13 @@ sub make_charmap_combined
if (defined $c->{ucs_second}) if (defined $c->{ucs_second})
{ {
my $entry = {utf8 => ucs2utf($c->{ucs}), my $entry = {
utf8 => ucs2utf($c->{ucs}),
utf8_second => ucs2utf($c->{ucs_second}), utf8_second => ucs2utf($c->{ucs_second}),
code => $c->{code}, code => $c->{code},
comment => $c->{comment}, comment => $c->{comment},
f => $c->{f}, l => $c->{l}}; f => $c->{f},
l => $c->{l} };
push @combined, $entry; push @combined, $entry;
} }
} }

View File

@ -38,6 +38,7 @@ mkdir $datadir;
# make sure we run one successful test without a TZ setting so we test # make sure we run one successful test without a TZ setting so we test
# initdb's time zone setting code # initdb's time zone setting code
{ {
# delete local only works from perl 5.12, so use the older way to do this # delete local only works from perl 5.12, so use the older way to do this
local (%ENV) = %ENV; local (%ENV) = %ENV;
delete $ENV{TZ}; delete $ENV{TZ};

View File

@ -39,8 +39,7 @@ $node->command_fails(
ok(!-d "$tempdir/backup", 'backup directory was cleaned up'); ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
$node->command_fails( $node->command_fails([ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ],
[ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ],
'failing run with no-clean option'); 'failing run with no-clean option');
ok(-d "$tempdir/backup", 'backup directory was created and left behind'); ok(-d "$tempdir/backup", 'backup directory was created and left behind');
@ -53,7 +52,9 @@ close $conf;
$node->restart; $node->restart;
# Write some files to test that they are not copied. # Write some files to test that they are not copied.
foreach my $filename (qw(backup_label tablespace_map postgresql.auto.conf.tmp current_logfiles.tmp)) foreach my $filename (
qw(backup_label tablespace_map postgresql.auto.conf.tmp current_logfiles.tmp)
)
{ {
open my $file, '>>', "$pgdata/$filename"; open my $file, '>>', "$pgdata/$filename";
print $file "DONOTCOPY"; print $file "DONOTCOPY";
@ -71,7 +72,9 @@ is_deeply(
'no WAL files copied'); 'no WAL files copied');
# Contents of these directories should not be copied. # Contents of these directories should not be copied.
foreach my $dirname (qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots pg_stat_tmp pg_subtrans)) foreach my $dirname (
qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots pg_stat_tmp pg_subtrans)
)
{ {
is_deeply( is_deeply(
[ sort(slurp_dir("$tempdir/backup/$dirname/")) ], [ sort(slurp_dir("$tempdir/backup/$dirname/")) ],
@ -80,14 +83,16 @@ foreach my $dirname (qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots
} }
# These files should not be copied. # These files should not be copied.
foreach my $filename (qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp)) foreach my $filename (
qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp)
)
{ {
ok(!-f "$tempdir/backup/$filename", "$filename not copied"); ok(!-f "$tempdir/backup/$filename", "$filename not copied");
} }
# Make sure existing backup_label was ignored. # Make sure existing backup_label was ignored.
isnt(slurp_file("$tempdir/backup/backup_label"), 'DONOTCOPY', isnt(slurp_file("$tempdir/backup/backup_label"),
'existing backup_label not copied'); 'DONOTCOPY', 'existing backup_label not copied');
$node->command_ok( $node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backup2", '--waldir', [ 'pg_basebackup', '-D', "$tempdir/backup2", '--waldir',
@ -124,7 +129,8 @@ $node->command_fails(
my $superlongname = "superlongname_" . ("x" x 100); my $superlongname = "superlongname_" . ("x" x 100);
my $superlongpath = "$pgdata/$superlongname"; my $superlongpath = "$pgdata/$superlongname";
open my $file, '>', "$superlongpath" or die "unable to create file $superlongpath"; open my $file, '>', "$superlongpath"
or die "unable to create file $superlongpath";
close $file; close $file;
$node->command_fails( $node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '-Ft' ], [ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '-Ft' ],
@ -183,7 +189,8 @@ SKIP:
"tablespace symlink was updated"); "tablespace symlink was updated");
closedir $dh; closedir $dh;
ok(-d "$tempdir/backup1/pg_replslot", 'pg_replslot symlink copied as directory'); ok( -d "$tempdir/backup1/pg_replslot",
'pg_replslot symlink copied as directory');
mkdir "$tempdir/tbl=spc2"; mkdir "$tempdir/tbl=spc2";
$node->safe_psql('postgres', "DROP TABLE test1;"); $node->safe_psql('postgres', "DROP TABLE test1;");
@ -222,7 +229,8 @@ like(
qr/^primary_conninfo = '.*port=$port.*'\n/m, qr/^primary_conninfo = '.*port=$port.*'\n/m,
'recovery.conf sets primary_conninfo'); 'recovery.conf sets primary_conninfo');
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backupxd" ], $node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backupxd" ],
'pg_basebackup runs in default xlog mode'); 'pg_basebackup runs in default xlog mode');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")), ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
'WAL files copied'); 'WAL files copied');
@ -242,7 +250,9 @@ $node->command_ok(
'pg_basebackup -X stream runs in tar mode'); 'pg_basebackup -X stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created"); ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
$node->command_ok( $node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backupnoslot", '-X', 'stream', '--no-slot' ], [ 'pg_basebackup', '-D',
"$tempdir/backupnoslot", '-X',
'stream', '--no-slot' ],
'pg_basebackup -X stream runs with --no-slot'); 'pg_basebackup -X stream runs with --no-slot');
$node->command_fails( $node->command_fails(

View File

@ -12,7 +12,8 @@ my $node = get_new_node('main');
# Initialize node without replication settings # Initialize node without replication settings
$node->init(allows_streaming => 1, has_archiving => 1); $node->init(allows_streaming => 1, has_archiving => 1);
$node->append_conf('postgresql.conf', q{ $node->append_conf(
'postgresql.conf', q{
wal_level = 'logical' wal_level = 'logical'
max_replication_slots = 4 max_replication_slots = 4
max_wal_senders = 4 max_wal_senders = 4
@ -22,25 +23,34 @@ log_error_verbosity = verbose
$node->dump_info; $node->dump_info;
$node->start; $node->start;
$node->command_fails(['pg_recvlogical'], $node->command_fails(['pg_recvlogical'], 'pg_recvlogical needs a slot name');
'pg_recvlogical needs a slot name');
$node->command_fails([ 'pg_recvlogical', '-S', 'test' ], $node->command_fails([ 'pg_recvlogical', '-S', 'test' ],
'pg_recvlogical needs a database'); 'pg_recvlogical needs a database');
$node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ], $node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ],
'pg_recvlogical needs an action'); 'pg_recvlogical needs an action');
$node->command_fails(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--start'], $node->command_fails(
[ 'pg_recvlogical', '-S',
'test', '-d',
$node->connstr('postgres'), '--start' ],
'no destination file'); 'no destination file');
$node->command_ok(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--create-slot'], $node->command_ok(
[ 'pg_recvlogical', '-S',
'test', '-d',
$node->connstr('postgres'), '--create-slot' ],
'slot created'); 'slot created');
my $slot = $node->slot('test'); my $slot = $node->slot('test');
isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot'); isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
$node->psql('postgres', 'CREATE TABLE test_table(x integer)'); $node->psql('postgres', 'CREATE TABLE test_table(x integer)');
$node->psql('postgres', 'INSERT INTO test_table(x) SELECT y FROM generate_series(1, 10) a(y);'); $node->psql('postgres',
my $nextlsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()'); 'INSERT INTO test_table(x) SELECT y FROM generate_series(1, 10) a(y);');
my $nextlsn =
$node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
chomp($nextlsn); chomp($nextlsn);
$node->command_ok(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'], $node->command_ok(
[ 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
'--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-' ],
'replayed a transaction'); 'replayed a transaction');

View File

@ -32,8 +32,7 @@ else
print $conf "listen_addresses = '127.0.0.1'\n"; print $conf "listen_addresses = '127.0.0.1'\n";
} }
close $conf; close $conf;
command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ], command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ], 'pg_ctl start');
'pg_ctl start');
# sleep here is because Windows builds can't check postmaster.pid exactly, # sleep here is because Windows builds can't check postmaster.pid exactly,
# so they may mistake a pre-existing postmaster.pid for one created by the # so they may mistake a pre-existing postmaster.pid for one created by the
@ -42,12 +41,12 @@ command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
sleep 3 if ($windows_os); sleep 3 if ($windows_os);
command_fails([ 'pg_ctl', 'start', '-D', "$tempdir/data" ], command_fails([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
'second pg_ctl start fails'); 'second pg_ctl start fails');
command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], 'pg_ctl stop');
'pg_ctl stop');
command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ],
'second pg_ctl stop fails'); 'second pg_ctl stop fails');
command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ], command_ok(
[ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
'pg_ctl restart with server not running'); 'pg_ctl restart with server not running');
command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ], command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
'pg_ctl restart with server running'); 'pg_ctl restart with server running');

View File

@ -7,26 +7,30 @@ use Test::More tests => 12;
my $tempdir = TestLib::tempdir; my $tempdir = TestLib::tempdir;
command_fails_like([ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ], command_fails_like(
[ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ],
qr/directory .* does not exist/, qr/directory .* does not exist/,
'pg_ctl promote with nonexistent directory'); 'pg_ctl promote with nonexistent directory');
my $node_primary = get_new_node('primary'); my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1); $node_primary->init(allows_streaming => 1);
command_fails_like([ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ], command_fails_like(
[ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
qr/PID file .* does not exist/, qr/PID file .* does not exist/,
'pg_ctl promote of not running instance fails'); 'pg_ctl promote of not running instance fails');
$node_primary->start; $node_primary->start;
command_fails_like([ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ], command_fails_like(
[ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
qr/not in standby mode/, qr/not in standby mode/,
'pg_ctl promote of primary instance fails'); 'pg_ctl promote of primary instance fails');
my $node_standby = get_new_node('standby'); my $node_standby = get_new_node('standby');
$node_primary->backup('my_backup'); $node_primary->backup('my_backup');
$node_standby->init_from_backup($node_primary, 'my_backup', has_streaming => 1); $node_standby->init_from_backup($node_primary, 'my_backup',
has_streaming => 1);
$node_standby->start; $node_standby->start;
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'), is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
@ -35,12 +39,14 @@ is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, '-W', 'promote' ], command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, '-W', 'promote' ],
'pg_ctl -W promote of standby runs'); 'pg_ctl -W promote of standby runs');
ok($node_standby->poll_query_until('postgres', 'SELECT NOT pg_is_in_recovery()'), ok( $node_standby->poll_query_until(
'postgres', 'SELECT NOT pg_is_in_recovery()'),
'promoted standby is not in recovery'); 'promoted standby is not in recovery');
# same again with default wait option # same again with default wait option
$node_standby = get_new_node('standby2'); $node_standby = get_new_node('standby2');
$node_standby->init_from_backup($node_primary, 'my_backup', has_streaming => 1); $node_standby->init_from_backup($node_primary, 'my_backup',
has_streaming => 1);
$node_standby->start; $node_standby->start;
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'), is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),

View File

@ -48,8 +48,7 @@ my %pgdump_runs = (
'-d', 'postgres', # alternative way to specify database '-d', 'postgres', # alternative way to specify database
], ],
restore_cmd => [ restore_cmd => [
'pg_restore', '-Fc', 'pg_restore', '-Fc', '--verbose',
'--verbose',
"--file=$tempdir/binary_upgrade.sql", "--file=$tempdir/binary_upgrade.sql",
"$tempdir/binary_upgrade.dump", ], }, "$tempdir/binary_upgrade.dump", ], },
clean => { clean => {
@ -71,12 +70,9 @@ my %pgdump_runs = (
'postgres', ], }, 'postgres', ], },
column_inserts => { column_inserts => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/column_inserts.sql", '-a',
"--file=$tempdir/column_inserts.sql", '--column-inserts', 'postgres', ], },
'-a',
'--column-inserts',
'postgres', ], },
createdb => { createdb => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump',
@ -98,11 +94,10 @@ my %pgdump_runs = (
'postgres', ], }, 'postgres', ], },
defaults => { defaults => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', '-f', "$tempdir/defaults.sql",
'-f',
"$tempdir/defaults.sql",
'postgres', ], }, 'postgres', ], },
# Do not use --no-sync to give test coverage for data sync. # Do not use --no-sync to give test coverage for data sync.
defaults_custom_format => { defaults_custom_format => {
test_key => 'defaults', test_key => 'defaults',
@ -113,6 +108,7 @@ my %pgdump_runs = (
'pg_restore', '-Fc', 'pg_restore', '-Fc',
"--file=$tempdir/defaults_custom_format.sql", "--file=$tempdir/defaults_custom_format.sql",
"$tempdir/defaults_custom_format.dump", ], }, "$tempdir/defaults_custom_format.dump", ], },
# Do not use --no-sync to give test coverage for data sync. # Do not use --no-sync to give test coverage for data sync.
defaults_dir_format => { defaults_dir_format => {
test_key => 'defaults', test_key => 'defaults',
@ -123,6 +119,7 @@ my %pgdump_runs = (
'pg_restore', '-Fd', 'pg_restore', '-Fd',
"--file=$tempdir/defaults_dir_format.sql", "--file=$tempdir/defaults_dir_format.sql",
"$tempdir/defaults_dir_format", ], }, "$tempdir/defaults_dir_format", ], },
# Do not use --no-sync to give test coverage for data sync. # Do not use --no-sync to give test coverage for data sync.
defaults_parallel => { defaults_parallel => {
test_key => 'defaults', test_key => 'defaults',
@ -133,6 +130,7 @@ my %pgdump_runs = (
'pg_restore', 'pg_restore',
"--file=$tempdir/defaults_parallel.sql", "--file=$tempdir/defaults_parallel.sql",
"$tempdir/defaults_parallel", ], }, "$tempdir/defaults_parallel", ], },
# Do not use --no-sync to give test coverage for data sync. # Do not use --no-sync to give test coverage for data sync.
defaults_tar_format => { defaults_tar_format => {
test_key => 'defaults', test_key => 'defaults',
@ -146,18 +144,14 @@ my %pgdump_runs = (
"$tempdir/defaults_tar_format.tar", ], }, "$tempdir/defaults_tar_format.tar", ], },
exclude_dump_test_schema => { exclude_dump_test_schema => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync',
"--file=$tempdir/exclude_dump_test_schema.sql", "--file=$tempdir/exclude_dump_test_schema.sql",
'--exclude-schema=dump_test', '--exclude-schema=dump_test', 'postgres', ], },
'postgres', ], },
exclude_test_table => { exclude_test_table => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync',
"--file=$tempdir/exclude_test_table.sql", "--file=$tempdir/exclude_test_table.sql",
'--exclude-table=dump_test.test_table', '--exclude-table=dump_test.test_table', 'postgres', ], },
'postgres', ], },
exclude_test_table_data => { exclude_test_table_data => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump',
@ -168,49 +162,36 @@ my %pgdump_runs = (
'postgres', ], }, 'postgres', ], },
pg_dumpall_globals => { pg_dumpall_globals => {
dump_cmd => [ dump_cmd => [
'pg_dumpall', '-v', 'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_globals.sql",
"--file=$tempdir/pg_dumpall_globals.sql", '-g', '-g', '--no-sync', ], },
'--no-sync', ], },
pg_dumpall_globals_clean => { pg_dumpall_globals_clean => {
dump_cmd => [ dump_cmd => [
'pg_dumpall', 'pg_dumpall', "--file=$tempdir/pg_dumpall_globals_clean.sql",
"--file=$tempdir/pg_dumpall_globals_clean.sql", '-g', '-c', '--no-sync', ], },
'-g',
'-c',
'--no-sync', ], },
pg_dumpall_dbprivs => { pg_dumpall_dbprivs => {
dump_cmd => [ dump_cmd => [
'pg_dumpall', 'pg_dumpall', '--no-sync',
'--no-sync',
"--file=$tempdir/pg_dumpall_dbprivs.sql", ], }, "--file=$tempdir/pg_dumpall_dbprivs.sql", ], },
no_blobs => { no_blobs => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/no_blobs.sql", '-B',
"--file=$tempdir/no_blobs.sql",
'-B',
'postgres', ], }, 'postgres', ], },
no_privs => { no_privs => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/no_privs.sql", '-x',
"--file=$tempdir/no_privs.sql",
'-x',
'postgres', ], }, 'postgres', ], },
no_owner => { no_owner => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/no_owner.sql", '-O',
"--file=$tempdir/no_owner.sql",
'-O',
'postgres', ], }, 'postgres', ], },
only_dump_test_schema => { only_dump_test_schema => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync',
"--file=$tempdir/only_dump_test_schema.sql", "--file=$tempdir/only_dump_test_schema.sql",
'--schema=dump_test', '--schema=dump_test', 'postgres', ], },
'postgres', ], },
only_dump_test_table => { only_dump_test_table => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump',
@ -243,16 +224,19 @@ my %pgdump_runs = (
"$tempdir/role_parallel", ], }, "$tempdir/role_parallel", ], },
schema_only => { schema_only => {
dump_cmd => [ dump_cmd => [
'pg_dump', '--format=plain', "--file=$tempdir/schema_only.sql", 'pg_dump', '--format=plain',
'--no-sync', '-s', 'postgres', ], }, "--file=$tempdir/schema_only.sql", '--no-sync',
'-s', 'postgres', ], },
section_pre_data => { section_pre_data => {
dump_cmd => [ dump_cmd => [
'pg_dump', "--file=$tempdir/section_pre_data.sql", 'pg_dump', "--file=$tempdir/section_pre_data.sql",
'--section=pre-data', '--no-sync', 'postgres', ], }, '--section=pre-data', '--no-sync',
'postgres', ], },
section_data => { section_data => {
dump_cmd => [ dump_cmd => [
'pg_dump', "--file=$tempdir/section_data.sql", 'pg_dump', "--file=$tempdir/section_data.sql",
'--section=data', '--no-sync', 'postgres', ], }, '--section=data', '--no-sync',
'postgres', ], },
section_post_data => { section_post_data => {
dump_cmd => [ dump_cmd => [
'pg_dump', "--file=$tempdir/section_post_data.sql", 'pg_dump', "--file=$tempdir/section_post_data.sql",
@ -265,8 +249,8 @@ my %pgdump_runs = (
with_oids => { with_oids => {
dump_cmd => [ dump_cmd => [
'pg_dump', '--oids', 'pg_dump', '--oids',
'--no-sync', '--no-sync', "--file=$tempdir/with_oids.sql",
"--file=$tempdir/with_oids.sql", 'postgres', ], },); 'postgres', ], },);
############################################################### ###############################################################
# Definition of the tests to run. # Definition of the tests to run.
@ -604,7 +588,8 @@ my %tests = (
'ALTER OPERATOR FAMILY dump_test.op_family USING btree' => { 'ALTER OPERATOR FAMILY dump_test.op_family USING btree' => {
all_runs => 1, all_runs => 1,
create_order => 75, create_order => 75,
create_sql => 'ALTER OPERATOR FAMILY dump_test.op_family USING btree ADD create_sql =>
'ALTER OPERATOR FAMILY dump_test.op_family USING btree ADD
OPERATOR 1 <(bigint,int4), OPERATOR 1 <(bigint,int4),
OPERATOR 2 <=(bigint,int4), OPERATOR 2 <=(bigint,int4),
OPERATOR 3 =(bigint,int4), OPERATOR 3 =(bigint,int4),
@ -921,7 +906,8 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => 'ALTER TABLE ... commands', catch_all => 'ALTER TABLE ... commands',
create_order => 93, create_order => 93,
create_sql => 'ALTER TABLE dump_test.test_table ALTER COLUMN col1 SET STATISTICS 90;', create_sql =>
'ALTER TABLE dump_test.test_table ALTER COLUMN col1 SET STATISTICS 90;',
regexp => qr/^ regexp => qr/^
\QALTER TABLE ONLY test_table ALTER COLUMN col1 SET STATISTICS 90;\E\n \QALTER TABLE ONLY test_table ALTER COLUMN col1 SET STATISTICS 90;\E\n
/xm, /xm,
@ -954,7 +940,8 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => 'ALTER TABLE ... commands', catch_all => 'ALTER TABLE ... commands',
create_order => 94, create_order => 94,
create_sql => 'ALTER TABLE dump_test.test_table ALTER COLUMN col2 SET STORAGE EXTERNAL;', create_sql =>
'ALTER TABLE dump_test.test_table ALTER COLUMN col2 SET STORAGE EXTERNAL;',
regexp => qr/^ regexp => qr/^
\QALTER TABLE ONLY test_table ALTER COLUMN col2 SET STORAGE EXTERNAL;\E\n \QALTER TABLE ONLY test_table ALTER COLUMN col2 SET STORAGE EXTERNAL;\E\n
/xm, /xm,
@ -987,7 +974,8 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => 'ALTER TABLE ... commands', catch_all => 'ALTER TABLE ... commands',
create_order => 95, create_order => 95,
create_sql => 'ALTER TABLE dump_test.test_table ALTER COLUMN col3 SET STORAGE MAIN;', create_sql =>
'ALTER TABLE dump_test.test_table ALTER COLUMN col3 SET STORAGE MAIN;',
regexp => qr/^ regexp => qr/^
\QALTER TABLE ONLY test_table ALTER COLUMN col3 SET STORAGE MAIN;\E\n \QALTER TABLE ONLY test_table ALTER COLUMN col3 SET STORAGE MAIN;\E\n
/xm, /xm,
@ -1020,7 +1008,8 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => 'ALTER TABLE ... commands', catch_all => 'ALTER TABLE ... commands',
create_order => 95, create_order => 95,
create_sql => 'ALTER TABLE dump_test.test_table ALTER COLUMN col4 SET (n_distinct = 10);', create_sql =>
'ALTER TABLE dump_test.test_table ALTER COLUMN col4 SET (n_distinct = 10);',
regexp => qr/^ regexp => qr/^
\QALTER TABLE ONLY test_table ALTER COLUMN col4 SET (n_distinct=10);\E\n \QALTER TABLE ONLY test_table ALTER COLUMN col4 SET (n_distinct=10);\E\n
/xm, /xm,
@ -1049,14 +1038,14 @@ my %tests = (
section_post_data => 1, section_post_data => 1,
section_data => 1, }, }, section_data => 1, }, },
'ALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2' => { 'ALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2'
=> {
all_runs => 1, all_runs => 1,
regexp => qr/^ regexp => qr/^
\QALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2 \E \QALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2 \E
\QFOR VALUES FROM ('2006-02-01') TO ('2006-03-01');\E\n \QFOR VALUES FROM ('2006-02-01') TO ('2006-03-01');\E\n
/xm, /xm,
like => { like => { binary_upgrade => 1, },
binary_upgrade => 1, },
unlike => { unlike => {
clean => 1, clean => 1,
clean_if_exists => 1, clean_if_exists => 1,
@ -1087,7 +1076,8 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => 'ALTER TABLE ... commands', catch_all => 'ALTER TABLE ... commands',
create_order => 96, create_order => 96,
create_sql => 'ALTER TABLE dump_test.test_table CLUSTER ON test_table_pkey', create_sql =>
'ALTER TABLE dump_test.test_table CLUSTER ON test_table_pkey',
regexp => qr/^ regexp => qr/^
\QALTER TABLE test_table CLUSTER ON test_table_pkey;\E\n \QALTER TABLE test_table CLUSTER ON test_table_pkey;\E\n
/xm, /xm,
@ -1380,7 +1370,8 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => catch_all =>
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)', 'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)',
regexp => qr/^ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 OWNER TO .*;/m, regexp =>
qr/^ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 OWNER TO .*;/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -1407,7 +1398,8 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => catch_all =>
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)', 'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)',
regexp => qr/^ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 OWNER TO .*;/m, regexp =>
qr/^ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 OWNER TO .*;/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -1433,7 +1425,8 @@ my %tests = (
# catch-all for ALTER ... OWNER (except LARGE OBJECTs and PUBLICATIONs) # catch-all for ALTER ... OWNER (except LARGE OBJECTs and PUBLICATIONs)
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)' => { 'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)' => {
all_runs => 0, # catch-all all_runs => 0, # catch-all
regexp => qr/^ALTER (?!LARGE OBJECT|PUBLICATION|SUBSCRIPTION)(.*) OWNER TO .*;/m, regexp =>
qr/^ALTER (?!LARGE OBJECT|PUBLICATION|SUBSCRIPTION)(.*) OWNER TO .*;/m,
like => {}, # use more-specific options above like => {}, # use more-specific options above
unlike => { unlike => {
column_inserts => 1, column_inserts => 1,
@ -1757,7 +1750,8 @@ my %tests = (
create_order => 79, create_order => 79,
create_sql => 'COMMENT ON CONVERSION dump_test.test_conversion create_sql => 'COMMENT ON CONVERSION dump_test.test_conversion
IS \'comment on test conversion\';', IS \'comment on test conversion\';',
regexp => qr/^COMMENT ON CONVERSION test_conversion IS 'comment on test conversion';/m, regexp =>
qr/^COMMENT ON CONVERSION test_conversion IS 'comment on test conversion';/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -1789,7 +1783,8 @@ my %tests = (
create_order => 77, create_order => 77,
create_sql => 'COMMENT ON COLLATION test0 create_sql => 'COMMENT ON COLLATION test0
IS \'comment on test0 collation\';', IS \'comment on test0 collation\';',
regexp => qr/^COMMENT ON COLLATION test0 IS 'comment on test0 collation';/m, regexp =>
qr/^COMMENT ON COLLATION test0 IS 'comment on test0 collation';/m,
collation => 1, collation => 1,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
@ -1862,7 +1857,8 @@ my %tests = (
create_order => 55, create_order => 55,
create_sql => 'COMMENT ON PUBLICATION pub1 create_sql => 'COMMENT ON PUBLICATION pub1
IS \'comment on publication\';', IS \'comment on publication\';',
regexp => qr/^COMMENT ON PUBLICATION pub1 IS 'comment on publication';/m, regexp =>
qr/^COMMENT ON PUBLICATION pub1 IS 'comment on publication';/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -1896,7 +1892,8 @@ my %tests = (
create_order => 55, create_order => 55,
create_sql => 'COMMENT ON SUBSCRIPTION sub1 create_sql => 'COMMENT ON SUBSCRIPTION sub1
IS \'comment on subscription\';', IS \'comment on subscription\';',
regexp => qr/^COMMENT ON SUBSCRIPTION sub1 IS 'comment on subscription';/m, regexp =>
qr/^COMMENT ON SUBSCRIPTION sub1 IS 'comment on subscription';/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -1929,9 +1926,11 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => 'COMMENT commands', catch_all => 'COMMENT commands',
create_order => 84, create_order => 84,
create_sql => 'COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 create_sql =>
'COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1
IS \'comment on text search configuration\';', IS \'comment on text search configuration\';',
regexp => qr/^COMMENT ON TEXT SEARCH CONFIGURATION alt_ts_conf1 IS 'comment on text search configuration';/m, regexp =>
qr/^COMMENT ON TEXT SEARCH CONFIGURATION alt_ts_conf1 IS 'comment on text search configuration';/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -1961,9 +1960,11 @@ my %tests = (
all_runs => 1, all_runs => 1,
catch_all => 'COMMENT commands', catch_all => 'COMMENT commands',
create_order => 84, create_order => 84,
create_sql => 'COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 create_sql =>
'COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1
IS \'comment on text search dictionary\';', IS \'comment on text search dictionary\';',
regexp => qr/^COMMENT ON TEXT SEARCH DICTIONARY alt_ts_dict1 IS 'comment on text search dictionary';/m, regexp =>
qr/^COMMENT ON TEXT SEARCH DICTIONARY alt_ts_dict1 IS 'comment on text search dictionary';/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -1995,7 +1996,8 @@ my %tests = (
create_order => 84, create_order => 84,
create_sql => 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1 create_sql => 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1
IS \'comment on text search parser\';', IS \'comment on text search parser\';',
regexp => qr/^COMMENT ON TEXT SEARCH PARSER alt_ts_prs1 IS 'comment on text search parser';/m, regexp =>
qr/^COMMENT ON TEXT SEARCH PARSER alt_ts_prs1 IS 'comment on text search parser';/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -2027,7 +2029,8 @@ my %tests = (
create_order => 84, create_order => 84,
create_sql => 'COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 create_sql => 'COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1
IS \'comment on text search template\';', IS \'comment on text search template\';',
regexp => qr/^COMMENT ON TEXT SEARCH TEMPLATE alt_ts_temp1 IS 'comment on text search template';/m, regexp =>
qr/^COMMENT ON TEXT SEARCH TEMPLATE alt_ts_temp1 IS 'comment on text search template';/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -2155,7 +2158,8 @@ my %tests = (
create_order => 71, create_order => 71,
create_sql => 'COMMENT ON TYPE dump_test.undefined create_sql => 'COMMENT ON TYPE dump_test.undefined
IS \'comment on undefined type\';', IS \'comment on undefined type\';',
regexp => qr/^COMMENT ON TYPE undefined IS 'comment on undefined type';/m, regexp =>
qr/^COMMENT ON TYPE undefined IS 'comment on undefined type';/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -2617,10 +2621,8 @@ qr/^\QINSERT INTO test_table_identity (col1, col2) OVERRIDING SYSTEM VALUE VALUE
all_runs => 1, all_runs => 1,
catch_all => 'CREATE ... commands', catch_all => 'CREATE ... commands',
create_order => 76, create_order => 76,
create_sql => create_sql => 'CREATE COLLATION test0 FROM "C";',
'CREATE COLLATION test0 FROM "C";', regexp => qr/^
regexp =>
qr/^
\QCREATE COLLATION test0 (provider = libc, locale = 'C');\E/xm, \QCREATE COLLATION test0 (provider = libc, locale = 'C');\E/xm,
collation => 1, collation => 1,
like => { like => {
@ -2828,8 +2830,10 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog
'CREATE CONVERSION dump_test.test_conversion' => { 'CREATE CONVERSION dump_test.test_conversion' => {
all_runs => 1, all_runs => 1,
create_order => 78, create_order => 78,
create_sql => 'CREATE DEFAULT CONVERSION dump_test.test_conversion FOR \'LATIN1\' TO \'UTF8\' FROM iso8859_1_to_utf8;', create_sql =>
regexp => qr/^\QCREATE DEFAULT CONVERSION test_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;\E/xm, 'CREATE DEFAULT CONVERSION dump_test.test_conversion FOR \'LATIN1\' TO \'UTF8\' FROM iso8859_1_to_utf8;',
regexp =>
qr/^\QCREATE DEFAULT CONVERSION test_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;\E/xm,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -3025,7 +3029,8 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog
'CREATE OPERATOR FAMILY dump_test.op_family' => { 'CREATE OPERATOR FAMILY dump_test.op_family' => {
all_runs => 1, all_runs => 1,
create_order => 73, create_order => 73,
create_sql => 'CREATE OPERATOR FAMILY dump_test.op_family USING btree;', create_sql =>
'CREATE OPERATOR FAMILY dump_test.op_family USING btree;',
regexp => qr/^ regexp => qr/^
\QCREATE OPERATOR FAMILY op_family USING btree;\E \QCREATE OPERATOR FAMILY op_family USING btree;\E
/xm, /xm,
@ -3340,7 +3345,8 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog
'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1' => { 'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1' => {
all_runs => 1, all_runs => 1,
create_order => 80, create_order => 80,
create_sql => 'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 (copy=english);', create_sql =>
'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 (copy=english);',
regexp => qr/^ regexp => qr/^
\QCREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (\E\n \QCREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (\E\n
\s+\QPARSER = pg_catalog."default" );\E/xm, \s+\QPARSER = pg_catalog."default" );\E/xm,
@ -3464,7 +3470,8 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog
'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1' => { 'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1' => {
all_runs => 1, all_runs => 1,
create_order => 81, create_order => 81,
create_sql => 'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 (lexize=dsimple_lexize);', create_sql =>
'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 (lexize=dsimple_lexize);',
regexp => qr/^ regexp => qr/^
\QCREATE TEXT SEARCH TEMPLATE alt_ts_temp1 (\E\n \QCREATE TEXT SEARCH TEMPLATE alt_ts_temp1 (\E\n
\s+\QLEXIZE = dsimple_lexize );\E/xm, \s+\QLEXIZE = dsimple_lexize );\E/xm,
@ -3539,7 +3546,8 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog
'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1' => { 'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1' => {
all_runs => 1, all_runs => 1,
create_order => 83, create_order => 83,
create_sql => 'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 (template=simple);', create_sql =>
'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 (template=simple);',
regexp => qr/^ regexp => qr/^
\QCREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (\E\n \QCREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (\E\n
\s+\QTEMPLATE = pg_catalog.simple );\E\n \s+\QTEMPLATE = pg_catalog.simple );\E\n
@ -3841,7 +3849,8 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog
'CREATE FOREIGN TABLE dump_test.foreign_table SERVER s1' => { 'CREATE FOREIGN TABLE dump_test.foreign_table SERVER s1' => {
all_runs => 1, all_runs => 1,
create_order => 88, create_order => 88,
create_sql => 'CREATE FOREIGN TABLE dump_test.foreign_table (c1 int options (column_name \'col1\')) create_sql =>
'CREATE FOREIGN TABLE dump_test.foreign_table (c1 int options (column_name \'col1\'))
SERVER s1 OPTIONS (schema_name \'x1\');', SERVER s1 OPTIONS (schema_name \'x1\');',
regexp => qr/ regexp => qr/
\QCREATE FOREIGN TABLE foreign_table (\E\n \QCREATE FOREIGN TABLE foreign_table (\E\n
@ -3883,8 +3892,10 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog
'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1' => { 'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1' => {
all_runs => 1, all_runs => 1,
create_order => 86, create_order => 86,
create_sql => 'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;', create_sql =>
regexp => qr/CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;/m, 'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;',
regexp =>
qr/CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;/m,
like => { like => {
binary_upgrade => 1, binary_upgrade => 1,
clean => 1, clean => 1,
@ -4700,7 +4711,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog
all_runs => 1, all_runs => 1,
catch_all => 'CREATE ... commands', catch_all => 'CREATE ... commands',
create_order => 11, create_order => 11,
create_sql => 'CREATE UNLOGGED TABLE dump_test_second_schema.test_third_table ( create_sql =>
'CREATE UNLOGGED TABLE dump_test_second_schema.test_third_table (
col1 serial col1 serial
) WITH OIDS;', ) WITH OIDS;',
regexp => qr/^ regexp => qr/^
@ -4788,7 +4800,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog
all_runs => 1, all_runs => 1,
catch_all => 'CREATE ... commands', catch_all => 'CREATE ... commands',
create_order => 91, create_order => 91,
create_sql => 'CREATE TABLE dump_test_second_schema.measurement_y2006m2 create_sql =>
'CREATE TABLE dump_test_second_schema.measurement_y2006m2
PARTITION OF dump_test.measurement FOR VALUES PARTITION OF dump_test.measurement FOR VALUES
FROM (\'2006-02-01\') TO (\'2006-03-01\');', FROM (\'2006-02-01\') TO (\'2006-03-01\');',
regexp => qr/^ regexp => qr/^
@ -5168,7 +5181,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog
all_runs => 1, all_runs => 1,
catch_all => 'CREATE ... commands', catch_all => 'CREATE ... commands',
create_order => 62, create_order => 62,
create_sql => 'ALTER VIEW dump_test.test_view ALTER COLUMN col1 SET DEFAULT 1;', create_sql =>
'ALTER VIEW dump_test.test_view ALTER COLUMN col1 SET DEFAULT 1;',
regexp => qr/^ regexp => qr/^
\QALTER TABLE ONLY test_view ALTER COLUMN col1 SET DEFAULT 1;\E/xm, \QALTER TABLE ONLY test_view ALTER COLUMN col1 SET DEFAULT 1;\E/xm,
like => { like => {
@ -5516,7 +5530,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog
all_runs => 1, all_runs => 1,
catch_all => 'GRANT commands', catch_all => 'GRANT commands',
create_order => 72, create_order => 72,
create_sql => 'GRANT USAGE ON DOMAIN dump_test.us_postal_code TO regress_dump_test_role;', create_sql =>
'GRANT USAGE ON DOMAIN dump_test.us_postal_code TO regress_dump_test_role;',
regexp => qr/^ regexp => qr/^
\QGRANT ALL ON TYPE us_postal_code TO regress_dump_test_role;\E \QGRANT ALL ON TYPE us_postal_code TO regress_dump_test_role;\E
/xm, /xm,
@ -5552,7 +5567,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog
all_runs => 1, all_runs => 1,
catch_all => 'GRANT commands', catch_all => 'GRANT commands',
create_order => 87, create_order => 87,
create_sql => 'GRANT USAGE ON TYPE dump_test.int42 TO regress_dump_test_role;', create_sql =>
'GRANT USAGE ON TYPE dump_test.int42 TO regress_dump_test_role;',
regexp => qr/^ regexp => qr/^
\QGRANT ALL ON TYPE int42 TO regress_dump_test_role;\E \QGRANT ALL ON TYPE int42 TO regress_dump_test_role;\E
/xm, /xm,
@ -5588,7 +5604,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog
all_runs => 1, all_runs => 1,
catch_all => 'GRANT commands', catch_all => 'GRANT commands',
create_order => 66, create_order => 66,
create_sql => 'GRANT USAGE ON TYPE dump_test.planets TO regress_dump_test_role;', create_sql =>
'GRANT USAGE ON TYPE dump_test.planets TO regress_dump_test_role;',
regexp => qr/^ regexp => qr/^
\QGRANT ALL ON TYPE planets TO regress_dump_test_role;\E \QGRANT ALL ON TYPE planets TO regress_dump_test_role;\E
/xm, /xm,
@ -5624,7 +5641,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog
all_runs => 1, all_runs => 1,
catch_all => 'GRANT commands', catch_all => 'GRANT commands',
create_order => 67, create_order => 67,
create_sql => 'GRANT USAGE ON TYPE dump_test.textrange TO regress_dump_test_role;', create_sql =>
'GRANT USAGE ON TYPE dump_test.textrange TO regress_dump_test_role;',
regexp => qr/^ regexp => qr/^
\QGRANT ALL ON TYPE textrange TO regress_dump_test_role;\E \QGRANT ALL ON TYPE textrange TO regress_dump_test_role;\E
/xm, /xm,
@ -6404,7 +6422,11 @@ my $port = $node->port;
# If it doesn't then we will skip all the COLLATION-related tests. # If it doesn't then we will skip all the COLLATION-related tests.
my $collation_support = 0; my $collation_support = 0;
my $collation_check_stderr; my $collation_check_stderr;
$node->psql('postgres',"CREATE COLLATION testing FROM \"C\"; DROP COLLATION testing;", on_error_stop => 0, stderr => \$collation_check_stderr); $node->psql(
'postgres',
"CREATE COLLATION testing FROM \"C\"; DROP COLLATION testing;",
on_error_stop => 0,
stderr => \$collation_check_stderr);
if ($collation_check_stderr !~ /ERROR: /) if ($collation_check_stderr !~ /ERROR: /)
{ {
@ -6436,8 +6458,10 @@ foreach my $run (sort keys %pgdump_runs)
# Then count all the tests run against each run # Then count all the tests run against each run
foreach my $test (sort keys %tests) foreach my $test (sort keys %tests)
{ {
# Skip any collation-related commands if there is no collation support # Skip any collation-related commands if there is no collation support
if (!$collation_support && defined($tests{$test}->{collation})) { if (!$collation_support && defined($tests{$test}->{collation}))
{
next; next;
} }
@ -6507,8 +6531,10 @@ foreach my $test (
{ {
if ($tests{$test}->{create_sql}) if ($tests{$test}->{create_sql})
{ {
# Skip any collation-related commands if there is no collation support # Skip any collation-related commands if there is no collation support
if (!$collation_support && defined($tests{$test}->{collation})) { if (!$collation_support && defined($tests{$test}->{collation}))
{
next; next;
} }
@ -6590,8 +6616,10 @@ foreach my $run (sort keys %pgdump_runs)
foreach my $test (sort keys %tests) foreach my $test (sort keys %tests)
{ {
# Skip any collation-related commands if there is no collation support # Skip any collation-related commands if there is no collation support
if (!$collation_support && defined($tests{$test}->{collation})) { if (!$collation_support && defined($tests{$test}->{collation}))
{
next; next;
} }

View File

@ -91,8 +91,8 @@ $node->safe_psql($dbname1, 'CREATE TABLE t0()');
# XXX no printed message when this fails, just SIGPIPE termination # XXX no printed message when this fails, just SIGPIPE termination
$node->command_ok( $node->command_ok(
[ 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt, [ 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt, '-U', $dbname1,
'-U', $dbname1, $node->connstr($dbname1) ], $node->connstr($dbname1) ],
'parallel dump'); 'parallel dump');
# recreate $dbname1 for restore test # recreate $dbname1 for restore test

View File

@ -45,7 +45,9 @@ $node->issues_sql_like(
$node->command_ok([qw(reindexdb --echo --table=pg_am dbname=template1)], $node->command_ok([qw(reindexdb --echo --table=pg_am dbname=template1)],
'reindexdb table with connection string'); 'reindexdb table with connection string');
$node->command_ok([qw(reindexdb --echo dbname=template1)], $node->command_ok(
[qw(reindexdb --echo dbname=template1)],
'reindexdb database with connection string'); 'reindexdb database with connection string');
$node->command_ok([qw(reindexdb --echo --system dbname=template1)], $node->command_ok(
[qw(reindexdb --echo --system dbname=template1)],
'reindexdb system with connection string'); 'reindexdb system with connection string');

View File

@ -17,7 +17,8 @@ $ENV{PGCLIENTENCODING} = 'LATIN1';
# Create database names covering the range of LATIN1 characters and # Create database names covering the range of LATIN1 characters and
# run the utilities' --all options over them. # run the utilities' --all options over them.
my $dbname1 = generate_ascii_string(1, 63); # contains '=' my $dbname1 = generate_ascii_string(1, 63); # contains '='
my $dbname2 = generate_ascii_string(67, 129); # skip 64-66 to keep length to 62 my $dbname2 =
generate_ascii_string(67, 129); # skip 64-66 to keep length to 62
my $dbname3 = generate_ascii_string(130, 192); my $dbname3 = generate_ascii_string(130, 192);
my $dbname4 = generate_ascii_string(193, 255); my $dbname4 = generate_ascii_string(193, 255);
@ -30,9 +31,11 @@ foreach my $dbname ($dbname1, $dbname2, $dbname3, $dbname4, 'CamelCase')
$node->run_log([ 'createdb', $dbname ]); $node->run_log([ 'createdb', $dbname ]);
} }
$node->command_ok([qw(vacuumdb --all --echo --analyze-only)], $node->command_ok(
[qw(vacuumdb --all --echo --analyze-only)],
'vacuumdb --all with unusual database names'); 'vacuumdb --all with unusual database names');
$node->command_ok([qw(reindexdb --all --echo)], $node->command_ok([qw(reindexdb --all --echo)],
'reindexdb --all with unusual database names'); 'reindexdb --all with unusual database names');
$node->command_ok([qw(clusterdb --all --echo --verbose)], $node->command_ok(
[qw(clusterdb --all --echo --verbose)],
'clusterdb --all with unusual database names'); 'clusterdb --all with unusual database names');

View File

@ -32,9 +32,11 @@ close $FH;
# and character decomposition mapping # and character decomposition mapping
my @characters = (); my @characters = ();
my %character_hash = (); my %character_hash = ();
open($FH, '<', "UnicodeData.txt") or die "Could not open UnicodeData.txt: $!."; open($FH, '<', "UnicodeData.txt")
or die "Could not open UnicodeData.txt: $!.";
while (my $line = <$FH>) while (my $line = <$FH>)
{ {
# Split the line wanted and get the fields needed: # Split the line wanted and get the fields needed:
# - Unicode code value # - Unicode code value
# - Canonical Combining Class # - Canonical Combining Class
@ -141,6 +143,7 @@ foreach my $char (@characters)
if ($decomp_size == 2) if ($decomp_size == 2)
{ {
# Should this be used for recomposition? # Should this be used for recomposition?
if ($compat) if ($compat)
{ {
@ -173,6 +176,7 @@ foreach my $char (@characters)
} }
elsif ($decomp_size == 1 && length($first_decomp) <= 4) elsif ($decomp_size == 1 && length($first_decomp) <= 4)
{ {
# The decomposition consists of a single codepoint, and it fits # The decomposition consists of a single codepoint, and it fits
# in a uint16, so we can store it "inline" in the main table. # in a uint16, so we can store it "inline" in the main table.
$flags .= " | DECOMP_INLINE"; $flags .= " | DECOMP_INLINE";
@ -201,6 +205,7 @@ foreach my $char (@characters)
print $OUTPUT "," unless ($code eq $last_code); print $OUTPUT "," unless ($code eq $last_code);
if ($comment ne "") if ($comment ne "")
{ {
# If the line is wide already, indent the comment with one tab, # If the line is wide already, indent the comment with one tab,
# otherwise with two. This is to make the output match the way # otherwise with two. This is to make the output match the way
# pgindent would mangle it. (This is quite hacky. To do this # pgindent would mangle it. (This is quite hacky. To do this

View File

@ -35,7 +35,8 @@ while (<$regress_in_fh>)
} }
# restore STDOUT/ERR so we can print the outcome to the user # restore STDOUT/ERR so we can print the outcome to the user
open(STDERR, ">&", $olderr_fh) or die; # can't complain as STDERR is still duped open(STDERR, ">&", $olderr_fh)
or die; # can't complain as STDERR is still duped
open(STDOUT, ">&", $oldout_fh) or die "can't restore STDOUT: $!"; open(STDOUT, ">&", $oldout_fh) or die "can't restore STDOUT: $!";
# just in case # just in case

View File

@ -52,7 +52,8 @@ sub ::encode_array_constructor
{ {
package PostgreSQL::InServer; ## no critic (RequireFilenameMatchesPackage); package PostgreSQL::InServer
; ## no critic (RequireFilenameMatchesPackage);
use strict; use strict;
use warnings; use warnings;

View File

@ -1,6 +1,7 @@
# src/pl/plperl/plc_trusted.pl # src/pl/plperl/plc_trusted.pl
package PostgreSQL::InServer::safe; ## no critic (RequireFilenameMatchesPackage); package PostgreSQL::InServer::safe
; ## no critic (RequireFilenameMatchesPackage);
# Load widely useful pragmas into plperl to make them available. # Load widely useful pragmas into plperl to make them available.
# #

View File

@ -44,7 +44,8 @@ sub test_role
$status_string = 'success' if ($expected_res eq 0); $status_string = 'success' if ($expected_res eq 0);
my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]); my $res =
$node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
is($res, $expected_res, is($res, $expected_res,
"authentication $status_string for method $method, role $role"); "authentication $status_string for method $method, role $role");
} }
@ -56,8 +57,11 @@ $node->start;
# Create 3 roles with different password methods for each one. The same # Create 3 roles with different password methods for each one. The same
# password is used for all of them. # password is used for all of them.
$node->safe_psql('postgres', "SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';"); $node->safe_psql('postgres',
$node->safe_psql('postgres', "SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';"); "SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';"
);
$node->safe_psql('postgres',
"SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';");
$ENV{"PGPASSWORD"} = 'pass'; $ENV{"PGPASSWORD"} = 'pass';
# For "trust" method, all users should be able to connect. # For "trust" method, all users should be able to connect.

View File

@ -41,9 +41,11 @@ sub test_login
$status_string = 'success' if ($expected_res eq 0); $status_string = 'success' if ($expected_res eq 0);
$ENV{"PGPASSWORD"} = $password; $ENV{"PGPASSWORD"} = $password;
my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]); my $res =
$node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
is($res, $expected_res, is($res, $expected_res,
"authentication $status_string for role $role with password $password"); "authentication $status_string for role $role with password $password"
);
} }
# Initialize master node. Force UTF-8 encoding, so that we can use non-ASCII # Initialize master node. Force UTF-8 encoding, so that we can use non-ASCII
@ -66,7 +68,8 @@ $node->start;
# 7 <U+0627><U+0031> Error - bidirectional check # 7 <U+0627><U+0031> Error - bidirectional check
# Create test roles. # Create test roles.
$node->safe_psql('postgres', $node->safe_psql(
'postgres',
"SET password_encryption='scram-sha-256'; "SET password_encryption='scram-sha-256';
SET client_encoding='utf8'; SET client_encoding='utf8';
CREATE ROLE saslpreptest1_role LOGIN PASSWORD 'IX'; CREATE ROLE saslpreptest1_role LOGIN PASSWORD 'IX';

View File

@ -44,8 +44,7 @@ is($master_ts, $standby_ts, "standby gives same value as master");
$master->append_conf('postgresql.conf', 'track_commit_timestamp = off'); $master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$master->restart; $master->restart;
$master->safe_psql('postgres', 'checkpoint'); $master->safe_psql('postgres', 'checkpoint');
$master_lsn = $master_lsn = $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
$master->safe_psql('postgres', 'select pg_current_wal_lsn()');
$standby->poll_query_until('postgres', $standby->poll_query_until('postgres',
qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()}) qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
or die "slave never caught up"; or die "slave never caught up";

View File

@ -41,12 +41,9 @@ my $tempdir_short = TestLib::tempdir_short;
my %pgdump_runs = ( my %pgdump_runs = (
binary_upgrade => { binary_upgrade => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/binary_upgrade.sql", '--schema-only',
"--file=$tempdir/binary_upgrade.sql", '--binary-upgrade', '--dbname=postgres', ], },
'--schema-only',
'--binary-upgrade',
'--dbname=postgres', ], },
clean => { clean => {
dump_cmd => [ dump_cmd => [
'pg_dump', "--file=$tempdir/clean.sql", 'pg_dump', "--file=$tempdir/clean.sql",
@ -63,12 +60,9 @@ my %pgdump_runs = (
'postgres', ], }, 'postgres', ], },
column_inserts => { column_inserts => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/column_inserts.sql", '-a',
"--file=$tempdir/column_inserts.sql", '--column-inserts', 'postgres', ], },
'-a',
'--column-inserts',
'postgres', ], },
createdb => { createdb => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump',
@ -126,52 +120,35 @@ my %pgdump_runs = (
"$tempdir/defaults_tar_format.tar", ], }, "$tempdir/defaults_tar_format.tar", ], },
pg_dumpall_globals => { pg_dumpall_globals => {
dump_cmd => [ dump_cmd => [
'pg_dumpall', 'pg_dumpall', '--no-sync',
'--no-sync', "--file=$tempdir/pg_dumpall_globals.sql", '-g', ], },
"--file=$tempdir/pg_dumpall_globals.sql",
'-g', ],
},
no_privs => { no_privs => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/no_privs.sql", '-x',
"--file=$tempdir/no_privs.sql",
'-x',
'postgres', ], }, 'postgres', ], },
no_owner => { no_owner => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/no_owner.sql", '-O',
"--file=$tempdir/no_owner.sql",
'-O',
'postgres', ], }, 'postgres', ], },
schema_only => { schema_only => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync', "--file=$tempdir/schema_only.sql",
'--no-sync', '-s', 'postgres', ], },
"--file=$tempdir/schema_only.sql",
'-s',
'postgres', ],
},
section_pre_data => { section_pre_data => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/section_pre_data.sql", '--section=pre-data',
"--file=$tempdir/section_pre_data.sql",
'--section=pre-data',
'postgres', ], }, 'postgres', ], },
section_data => { section_data => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync',
'--no-sync', "--file=$tempdir/section_data.sql", '--section=data',
"--file=$tempdir/section_data.sql",
'--section=data',
'postgres', ], }, 'postgres', ], },
section_post_data => { section_post_data => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump', '--no-sync', "--file=$tempdir/section_post_data.sql",
'--no-sync',
"--file=$tempdir/section_post_data.sql",
'--section=post-data', 'postgres', ], },); '--section=post-data', 'postgres', ], },);
############################################################### ###############################################################
@ -492,9 +469,8 @@ my %tests = (
pg_dumpall_globals => 1, pg_dumpall_globals => 1,
section_post_data => 1, }, }, section_post_data => 1, }, },
'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' =>
=> { { create_order => 4,
create_order => 4,
create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table
TO regress_dump_test_role;', TO regress_dump_test_role;',
regexp => qr/^ regexp => qr/^

View File

@ -846,6 +846,7 @@ sub _update_pid
$self->{_pid} = undef; $self->{_pid} = undef;
print "# No postmaster PID for node \"$name\"\n"; print "# No postmaster PID for node \"$name\"\n";
# Complain if we expected to find a pidfile. # Complain if we expected to find a pidfile.
BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running; BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running;
} }
@ -1140,10 +1141,12 @@ sub psql
my $exc_save = $@; my $exc_save = $@;
if ($exc_save) if ($exc_save)
{ {
# IPC::Run::run threw an exception. re-throw unless it's a # IPC::Run::run threw an exception. re-throw unless it's a
# timeout, which we'll handle by testing is_expired # timeout, which we'll handle by testing is_expired
die $exc_save die $exc_save
if (blessed($exc_save) || $exc_save !~ /^\Q$timeout_exception\E/); if (blessed($exc_save)
|| $exc_save !~ /^\Q$timeout_exception\E/);
$ret = undef; $ret = undef;
@ -1191,7 +1194,8 @@ sub psql
if $ret == 1; if $ret == 1;
die "connection error: '$$stderr'\nwhile running '@psql_params'" die "connection error: '$$stderr'\nwhile running '@psql_params'"
if $ret == 2; if $ret == 2;
die "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'" die
"error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
if $ret == 3; if $ret == 3;
die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'"; die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
} }
@ -1362,14 +1366,16 @@ mode must be specified.
sub lsn sub lsn
{ {
my ($self, $mode) = @_; my ($self, $mode) = @_;
my %modes = ('insert' => 'pg_current_wal_insert_lsn()', my %modes = (
'insert' => 'pg_current_wal_insert_lsn()',
'flush' => 'pg_current_wal_flush_lsn()', 'flush' => 'pg_current_wal_flush_lsn()',
'write' => 'pg_current_wal_lsn()', 'write' => 'pg_current_wal_lsn()',
'receive' => 'pg_last_wal_receive_lsn()', 'receive' => 'pg_last_wal_receive_lsn()',
'replay' => 'pg_last_wal_replay_lsn()'); 'replay' => 'pg_last_wal_replay_lsn()');
$mode = '<undef>' if !defined($mode); $mode = '<undef>' if !defined($mode);
die "unknown mode for 'lsn': '$mode', valid modes are " . join(', ', keys %modes) die "unknown mode for 'lsn': '$mode', valid modes are "
. join(', ', keys %modes)
if !defined($modes{$mode}); if !defined($modes{$mode});
my $result = $self->safe_psql('postgres', "SELECT $modes{$mode}"); my $result = $self->safe_psql('postgres', "SELECT $modes{$mode}");
@ -1409,18 +1415,29 @@ sub wait_for_catchup
{ {
my ($self, $standby_name, $mode, $target_lsn) = @_; my ($self, $standby_name, $mode, $target_lsn) = @_;
$mode = defined($mode) ? $mode : 'replay'; $mode = defined($mode) ? $mode : 'replay';
my %valid_modes = ( 'sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1 ); my %valid_modes =
die "unknown mode $mode for 'wait_for_catchup', valid modes are " . join(', ', keys(%valid_modes)) unless exists($valid_modes{$mode}); ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
die "unknown mode $mode for 'wait_for_catchup', valid modes are "
. join(', ', keys(%valid_modes))
unless exists($valid_modes{$mode});
# Allow passing of a PostgresNode instance as shorthand # Allow passing of a PostgresNode instance as shorthand
if (blessed($standby_name) && $standby_name->isa("PostgresNode")) if (blessed($standby_name) && $standby_name->isa("PostgresNode"))
{ {
$standby_name = $standby_name->name; $standby_name = $standby_name->name;
} }
die 'target_lsn must be specified' unless defined($target_lsn); die 'target_lsn must be specified' unless defined($target_lsn);
print "Waiting for replication conn " . $standby_name . "'s " . $mode . "_lsn to pass " . $target_lsn . " on " . $self->name . "\n"; print "Waiting for replication conn "
my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';]; . $standby_name . "'s "
. $mode
. "_lsn to pass "
. $target_lsn . " on "
. $self->name . "\n";
my $query =
qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
$self->poll_query_until('postgres', $query) $self->poll_query_until('postgres', $query)
or die "timed out waiting for catchup, current location is " . ($self->safe_psql('postgres', $query) || '(unknown)'); or die "timed out waiting for catchup, current location is "
. ($self->safe_psql('postgres', $query) || '(unknown)');
print "done\n"; print "done\n";
} }
@ -1453,10 +1470,17 @@ sub wait_for_slot_catchup
die "valid modes are restart, confirmed_flush"; die "valid modes are restart, confirmed_flush";
} }
die 'target lsn must be specified' unless defined($target_lsn); die 'target lsn must be specified' unless defined($target_lsn);
print "Waiting for replication slot " . $slot_name . "'s " . $mode . "_lsn to pass " . $target_lsn . " on " . $self->name . "\n"; print "Waiting for replication slot "
my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';]; . $slot_name . "'s "
. $mode
. "_lsn to pass "
. $target_lsn . " on "
. $self->name . "\n";
my $query =
qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
$self->poll_query_until('postgres', $query) $self->poll_query_until('postgres', $query)
or die "timed out waiting for catchup, current location is " . ($self->safe_psql('postgres', $query) || '(unknown)'); or die "timed out waiting for catchup, current location is "
. ($self->safe_psql('postgres', $query) || '(unknown)');
print "done\n"; print "done\n";
} }
@ -1485,18 +1509,23 @@ null columns.
sub query_hash sub query_hash
{ {
my ($self, $dbname, $query, @columns) = @_; my ($self, $dbname, $query, @columns) = @_;
die 'calls in array context for multi-row results not supported yet' if (wantarray); die 'calls in array context for multi-row results not supported yet'
if (wantarray);
# Replace __COLUMNS__ if found # Replace __COLUMNS__ if found
substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) = join(', ', @columns) substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) =
join(', ', @columns)
if index($query, '__COLUMNS__') >= 0; if index($query, '__COLUMNS__') >= 0;
my $result = $self->safe_psql($dbname, $query); my $result = $self->safe_psql($dbname, $query);
# hash slice, see http://stackoverflow.com/a/16755894/398670 . # hash slice, see http://stackoverflow.com/a/16755894/398670 .
# #
# Fills the hash with empty strings produced by x-operator element # Fills the hash with empty strings produced by x-operator element
# duplication if result is an empty row # duplication if result is an empty row
# #
my %val; my %val;
@val{@columns} = $result ne '' ? split(qr/\|/, $result) : ('',) x scalar(@columns); @val{@columns} =
$result ne '' ? split(qr/\|/, $result) : ('',) x scalar(@columns);
return \%val; return \%val;
} }
@ -1518,8 +1547,14 @@ either.
sub slot sub slot
{ {
my ($self, $slot_name) = @_; my ($self, $slot_name) = @_;
my @columns = ('plugin', 'slot_type', 'datoid', 'database', 'active', 'active_pid', 'xmin', 'catalog_xmin', 'restart_lsn'); my @columns = (
return $self->query_hash('postgres', "SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'", @columns); 'plugin', 'slot_type', 'datoid', 'database',
'active', 'active_pid', 'xmin', 'catalog_xmin',
'restart_lsn');
return $self->query_hash(
'postgres',
"SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'",
@columns);
} }
=pod =pod
@ -1543,7 +1578,8 @@ to check for timeout. retval is undef on timeout.
sub pg_recvlogical_upto sub pg_recvlogical_upto
{ {
my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) = @_; my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) =
@_;
my ($stdout, $stderr); my ($stdout, $stderr);
my $timeout_exception = 'pg_recvlogical timed out'; my $timeout_exception = 'pg_recvlogical timed out';
@ -1551,21 +1587,27 @@ sub pg_recvlogical_upto
die 'slot name must be specified' unless defined($slot_name); die 'slot name must be specified' unless defined($slot_name);
die 'endpos must be specified' unless defined($endpos); die 'endpos must be specified' unless defined($endpos);
my @cmd = ('pg_recvlogical', '-S', $slot_name, '--dbname', $self->connstr($dbname)); my @cmd = (
'pg_recvlogical', '-S', $slot_name, '--dbname',
$self->connstr($dbname));
push @cmd, '--endpos', $endpos; push @cmd, '--endpos', $endpos;
push @cmd, '-f', '-', '--no-loop', '--start'; push @cmd, '-f', '-', '--no-loop', '--start';
while (my ($k, $v) = each %plugin_options) while (my ($k, $v) = each %plugin_options)
{ {
die "= is not permitted to appear in replication option name" if ($k =~ qr/=/); die "= is not permitted to appear in replication option name"
if ($k =~ qr/=/);
push @cmd, "-o", "$k=$v"; push @cmd, "-o", "$k=$v";
} }
my $timeout; my $timeout;
$timeout = IPC::Run::timeout($timeout_secs, exception => $timeout_exception ) if $timeout_secs; $timeout =
IPC::Run::timeout($timeout_secs, exception => $timeout_exception)
if $timeout_secs;
my $ret = 0; my $ret = 0;
do { do
{
local $@; local $@;
eval { eval {
IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout); IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout);
@ -1574,6 +1616,7 @@ sub pg_recvlogical_upto
my $exc_save = $@; my $exc_save = $@;
if ($exc_save) if ($exc_save)
{ {
# IPC::Run::run threw an exception. re-throw unless it's a # IPC::Run::run threw an exception. re-throw unless it's a
# timeout, which we'll handle by testing is_expired # timeout, which we'll handle by testing is_expired
die $exc_save die $exc_save
@ -1584,7 +1627,8 @@ sub pg_recvlogical_upto
die "Got timeout exception '$exc_save' but timer not expired?!" die "Got timeout exception '$exc_save' but timer not expired?!"
unless $timeout->is_expired; unless $timeout->is_expired;
die "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'" die
"$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
unless wantarray; unless wantarray;
} }
}; };
@ -1598,7 +1642,9 @@ sub pg_recvlogical_upto
} }
else else
{ {
die "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'" if $ret; die
"pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'"
if $ret;
return $stdout; return $stdout;
} }
} }

View File

@ -17,6 +17,7 @@ use File::Spec;
use File::Temp (); use File::Temp ();
use IPC::Run; use IPC::Run;
use SimpleTee; use SimpleTee;
# specify a recent enough version of Test::More to support the note() function # specify a recent enough version of Test::More to support the note() function
use Test::More 0.82; use Test::More 0.82;

View File

@ -40,8 +40,10 @@ $node_master->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a"); "CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a");
# Wait for standbys to catch up # Wait for standbys to catch up
$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert')); $node_master->wait_for_catchup($node_standby_1, 'replay',
$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay')); $node_master->lsn('insert'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
$node_standby_1->lsn('replay'));
my $result = my $result =
$node_standby_1->safe_psql('postgres', "SELECT count(*) FROM tab_int"); $node_standby_1->safe_psql('postgres', "SELECT count(*) FROM tab_int");
@ -89,25 +91,32 @@ sub test_target_session_attrs
# The client used for the connection does not matter, only the backend # The client used for the connection does not matter, only the backend
# point does. # point does.
my ($ret, $stdout, $stderr) = my ($ret, $stdout, $stderr) =
$node1->psql('postgres', 'SHOW port;', extra_params => ['-d', $connstr]); $node1->psql('postgres', 'SHOW port;',
is($status == $ret && $stdout eq $target_node->port, 1, extra_params => [ '-d', $connstr ]);
"connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed"); is( $status == $ret && $stdout eq $target_node->port,
1,
"connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed"
);
} }
# Connect to master in "read-write" mode with master,standby1 list. # Connect to master in "read-write" mode with master,standby1 list.
test_target_session_attrs($node_master, $node_standby_1, $node_master, test_target_session_attrs($node_master, $node_standby_1, $node_master,
"read-write", 0); "read-write", 0);
# Connect to master in "read-write" mode with standby1,master list. # Connect to master in "read-write" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_master, test_target_session_attrs($node_standby_1, $node_master, $node_master,
"read-write", 0); "read-write", 0);
# Connect to master in "any" mode with master,standby1 list. # Connect to master in "any" mode with master,standby1 list.
test_target_session_attrs($node_master, $node_standby_1, $node_master, test_target_session_attrs($node_master, $node_standby_1, $node_master, "any",
"any", 0); 0);
# Connect to standby1 in "any" mode with standby1,master list. # Connect to standby1 in "any" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_standby_1, test_target_session_attrs($node_standby_1, $node_master, $node_standby_1,
"any", 0); "any", 0);
note "switching to physical replication slot"; note "switching to physical replication slot";
# Switch to using a physical replication slot. We can do this without a new # Switch to using a physical replication slot. We can do this without a new
# backup since physical slots can go backwards if needed. Do so on both # backup since physical slots can go backwards if needed. Do so on both
# standbys. Since we're going to be testing things that affect the slot state, # standbys. Since we're going to be testing things that affect the slot state,
@ -115,14 +124,26 @@ note "switching to physical replication slot";
my ($slotname_1, $slotname_2) = ('standby_1', 'standby_2'); my ($slotname_1, $slotname_2) = ('standby_1', 'standby_2');
$node_master->append_conf('postgresql.conf', "max_replication_slots = 4"); $node_master->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_master->restart; $node_master->restart;
is($node_master->psql('postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_1');]), 0, 'physical slot created on master'); is( $node_master->psql(
$node_standby_1->append_conf('recovery.conf', "primary_slot_name = $slotname_1"); 'postgres',
$node_standby_1->append_conf('postgresql.conf', "wal_receiver_status_interval = 1"); qq[SELECT pg_create_physical_replication_slot('$slotname_1');]),
0,
'physical slot created on master');
$node_standby_1->append_conf('recovery.conf',
"primary_slot_name = $slotname_1");
$node_standby_1->append_conf('postgresql.conf',
"wal_receiver_status_interval = 1");
$node_standby_1->append_conf('postgresql.conf', "max_replication_slots = 4"); $node_standby_1->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_standby_1->restart; $node_standby_1->restart;
is($node_standby_1->psql('postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_2');]), 0, 'physical slot created on intermediate replica'); is( $node_standby_1->psql(
$node_standby_2->append_conf('recovery.conf', "primary_slot_name = $slotname_2"); 'postgres',
$node_standby_2->append_conf('postgresql.conf', "wal_receiver_status_interval = 1"); qq[SELECT pg_create_physical_replication_slot('$slotname_2');]),
0,
'physical slot created on intermediate replica');
$node_standby_2->append_conf('recovery.conf',
"primary_slot_name = $slotname_2");
$node_standby_2->append_conf('postgresql.conf',
"wal_receiver_status_interval = 1");
$node_standby_2->restart; $node_standby_2->restart;
sub get_slot_xmins sub get_slot_xmins
@ -147,23 +168,32 @@ $node_master->safe_psql('postgres', 'CREATE TABLE replayed(val integer);');
sub replay_check sub replay_check
{ {
my $newval = $node_master->safe_psql('postgres', 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'); my $newval = $node_master->safe_psql('postgres',
$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert')); 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'
$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay')); );
$node_standby_1->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval]) $node_master->wait_for_catchup($node_standby_1, 'replay',
$node_master->lsn('insert'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
$node_standby_1->lsn('replay'));
$node_standby_1->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval])
or die "standby_1 didn't replay master value $newval"; or die "standby_1 didn't replay master value $newval";
$node_standby_2->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval]) $node_standby_2->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval])
or die "standby_2 didn't replay standby_1 value $newval"; or die "standby_2 didn't replay standby_1 value $newval";
} }
replay_check(); replay_check();
note "enabling hot_standby_feedback"; note "enabling hot_standby_feedback";
# Enable hs_feedback. The slot should gain an xmin. We set the status interval # Enable hs_feedback. The slot should gain an xmin. We set the status interval
# so we'll see the results promptly. # so we'll see the results promptly.
$node_standby_1->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;'); $node_standby_1->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_1->reload; $node_standby_1->reload;
$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;'); $node_standby_2->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->reload; $node_standby_2->reload;
replay_check(); replay_check();
sleep(2); sleep(2);
@ -177,7 +207,8 @@ isnt($xmin, '', 'cascaded slot xmin non-null with hs feedback');
is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback'); is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback');
note "doing some work to advance xmin"; note "doing some work to advance xmin";
for my $i (10000..11000) { for my $i (10000 .. 11000)
{
$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES ($i);]); $node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES ($i);]);
} }
$node_master->safe_psql('postgres', 'VACUUM;'); $node_master->safe_psql('postgres', 'VACUUM;');
@ -186,38 +217,46 @@ $node_master->safe_psql('postgres', 'CHECKPOINT;');
my ($xmin2, $catalog_xmin2) = get_slot_xmins($node_master, $slotname_1); my ($xmin2, $catalog_xmin2) = get_slot_xmins($node_master, $slotname_1);
note "new xmin $xmin2, old xmin $xmin"; note "new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'non-cascaded slot xmin with hs feedback has changed'); isnt($xmin2, $xmin, 'non-cascaded slot xmin with hs feedback has changed');
is($catalog_xmin2, '', 'non-cascaded slot xmin still null with hs_feedback unchanged'); is($catalog_xmin2, '',
'non-cascaded slot xmin still null with hs_feedback unchanged');
($xmin2, $catalog_xmin2) = get_slot_xmins($node_standby_1, $slotname_2); ($xmin2, $catalog_xmin2) = get_slot_xmins($node_standby_1, $slotname_2);
note "new xmin $xmin2, old xmin $xmin"; note "new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'cascaded slot xmin with hs feedback has changed'); isnt($xmin2, $xmin, 'cascaded slot xmin with hs feedback has changed');
is($catalog_xmin2, '', 'cascaded slot xmin still null with hs_feedback unchanged'); is($catalog_xmin2, '',
'cascaded slot xmin still null with hs_feedback unchanged');
note "disabling hot_standby_feedback"; note "disabling hot_standby_feedback";
# Disable hs_feedback. Xmin should be cleared. # Disable hs_feedback. Xmin should be cleared.
$node_standby_1->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;'); $node_standby_1->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_1->reload; $node_standby_1->reload;
$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;'); $node_standby_2->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->reload; $node_standby_2->reload;
replay_check(); replay_check();
sleep(2); sleep(2);
($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1); ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1);
is($xmin, '', 'non-cascaded slot xmin null with hs feedback reset'); is($xmin, '', 'non-cascaded slot xmin null with hs feedback reset');
is($catalog_xmin, '', 'non-cascaded slot xmin still null with hs_feedback reset'); is($catalog_xmin, '',
'non-cascaded slot xmin still null with hs_feedback reset');
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2); ($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
is($xmin, '', 'cascaded slot xmin null with hs feedback reset'); is($xmin, '', 'cascaded slot xmin null with hs feedback reset');
is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback reset'); is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback reset');
note "re-enabling hot_standby_feedback and disabling while stopped"; note "re-enabling hot_standby_feedback and disabling while stopped";
$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;'); $node_standby_2->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->reload; $node_standby_2->reload;
$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES (11000);]); $node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES (11000);]);
replay_check(); replay_check();
$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;'); $node_standby_2->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->stop; $node_standby_2->stop;
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2); ($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
@ -227,4 +266,5 @@ isnt($xmin, '', 'cascaded slot xmin non-null with postgres shut down');
$node_standby_2->start; $node_standby_2->start;
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2); ($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
is($xmin, '', 'cascaded slot xmin reset after startup with hs feedback reset'); is($xmin, '',
'cascaded slot xmin reset after startup with hs feedback reset');

View File

@ -22,8 +22,7 @@ sub test_recovery_standby
foreach my $param_item (@$recovery_params) foreach my $param_item (@$recovery_params)
{ {
$node_standby->append_conf( $node_standby->append_conf('recovery.conf', qq($param_item));
'recovery.conf', qq($param_item));
} }
$node_standby->start; $node_standby->start;
@ -71,8 +70,8 @@ my ($lsn2, $recovery_txid) = split /\|/, $ret;
# More data, with recovery target timestamp # More data, with recovery target timestamp
$node_master->safe_psql('postgres', $node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(2001,3000))"); "INSERT INTO tab_int VALUES (generate_series(2001,3000))");
$ret = $node_master->safe_psql('postgres', $ret =
"SELECT pg_current_wal_lsn(), now();"); $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn(), now();");
my ($lsn3, $recovery_time) = split /\|/, $ret; my ($lsn3, $recovery_time) = split /\|/, $ret;
# Even more data, this time with a recovery target name # Even more data, this time with a recovery target name
@ -87,7 +86,8 @@ $node_master->safe_psql('postgres',
# And now for a recovery target LSN # And now for a recovery target LSN
$node_master->safe_psql('postgres', $node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(4001,5000))"); "INSERT INTO tab_int VALUES (generate_series(4001,5000))");
my $recovery_lsn = $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); my $recovery_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
my $lsn5 = my $lsn5 =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");

View File

@ -34,7 +34,8 @@ $node_master->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"); "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a");
# Wait until standby has replayed enough data on standby 1 # Wait until standby has replayed enough data on standby 1
$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('write')); $node_master->wait_for_catchup($node_standby_1, 'replay',
$node_master->lsn('write'));
# Stop and remove master, and promote standby 1, switching it to a new timeline # Stop and remove master, and promote standby 1, switching it to a new timeline
$node_master->teardown_node; $node_master->teardown_node;
@ -55,7 +56,8 @@ $node_standby_2->restart;
# to ensure that the timeline switch has been done. # to ensure that the timeline switch has been done.
$node_standby_1->safe_psql('postgres', $node_standby_1->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(1001,2000))"); "INSERT INTO tab_int VALUES (generate_series(1001,2000))");
$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('write')); $node_standby_1->wait_for_catchup($node_standby_2, 'replay',
$node_standby_1->lsn('write'));
my $result = my $result =
$node_standby_2->safe_psql('postgres', "SELECT count(*) FROM tab_int"); $node_standby_2->safe_psql('postgres', "SELECT count(*) FROM tab_int");

View File

@ -50,8 +50,7 @@ while ($remaining-- > 0)
# Done waiting? # Done waiting?
my $replay_status = $node_standby->safe_psql('postgres', my $replay_status = $node_standby->safe_psql('postgres',
"SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0" "SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0");
);
last if $replay_status eq 't'; last if $replay_status eq 't';
# No, sleep some more. # No, sleep some more.

View File

@ -20,15 +20,21 @@ wal_level = logical
$node_master->start; $node_master->start;
my $backup_name = 'master_backup'; my $backup_name = 'master_backup';
$node_master->safe_psql('postgres', qq[CREATE TABLE decoding_test(x integer, y text);]); $node_master->safe_psql('postgres',
qq[CREATE TABLE decoding_test(x integer, y text);]);
$node_master->safe_psql('postgres', qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]); $node_master->safe_psql('postgres',
qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]);
$node_master->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]); $node_master->safe_psql('postgres',
qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]
);
# Basic decoding works # Basic decoding works
my($result) = $node_master->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]); my ($result) = $node_master->safe_psql('postgres',
is(scalar(my @foobar = split /^/m, $result), 12, 'Decoding produced 12 rows inc BEGIN/COMMIT'); qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
is(scalar(my @foobar = split /^/m, $result),
12, 'Decoding produced 12 rows inc BEGIN/COMMIT');
# If we immediately crash the server we might lose the progress we just made # If we immediately crash the server we might lose the progress we just made
# and replay the same changes again. But a clean shutdown should never repeat # and replay the same changes again. But a clean shutdown should never repeat
@ -36,13 +42,16 @@ is(scalar(my @foobar = split /^/m, $result), 12, 'Decoding produced 12 rows inc
$node_master->restart('fast'); $node_master->restart('fast');
# There are no new writes, so the result should be empty. # There are no new writes, so the result should be empty.
$result = $node_master->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]); $result = $node_master->safe_psql('postgres',
qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
chomp($result); chomp($result);
is($result, '', 'Decoding after fast restart repeats no rows'); is($result, '', 'Decoding after fast restart repeats no rows');
# Insert some rows and verify that we get the same results from pg_recvlogical # Insert some rows and verify that we get the same results from pg_recvlogical
# and the SQL interface. # and the SQL interface.
$node_master->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]); $node_master->safe_psql('postgres',
qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]
);
my $expected = q{BEGIN my $expected = q{BEGIN
table public.decoding_test: INSERT: x[integer]:1 y[text]:'1' table public.decoding_test: INSERT: x[integer]:1 y[text]:'1'
@ -51,59 +60,91 @@ table public.decoding_test: INSERT: x[integer]:3 y[text]:'3'
table public.decoding_test: INSERT: x[integer]:4 y[text]:'4' table public.decoding_test: INSERT: x[integer]:4 y[text]:'4'
COMMIT}; COMMIT};
my $stdout_sql = $node_master->safe_psql('postgres', qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]); my $stdout_sql = $node_master->safe_psql('postgres',
qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]
);
is($stdout_sql, $expected, 'got expected output from SQL decoding session'); is($stdout_sql, $expected, 'got expected output from SQL decoding session');
my $endpos = $node_master->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"); my $endpos = $node_master->safe_psql('postgres',
"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
);
print "waiting to replay $endpos\n"; print "waiting to replay $endpos\n";
my $stdout_recv = $node_master->pg_recvlogical_upto('postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', 'skip-empty-xacts' => '1'); my $stdout_recv = $node_master->pg_recvlogical_upto(
'postgres', 'test_slot', $endpos, 10,
'include-xids' => '0',
'skip-empty-xacts' => '1');
chomp($stdout_recv); chomp($stdout_recv);
is($stdout_recv, $expected, 'got same expected output from pg_recvlogical decoding session'); is($stdout_recv, $expected,
'got same expected output from pg_recvlogical decoding session');
$stdout_recv = $node_master->pg_recvlogical_upto('postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', 'skip-empty-xacts' => '1'); $stdout_recv = $node_master->pg_recvlogical_upto(
'postgres', 'test_slot', $endpos, 10,
'include-xids' => '0',
'skip-empty-xacts' => '1');
chomp($stdout_recv); chomp($stdout_recv);
is($stdout_recv, '', 'pg_recvlogical acknowledged changes, nothing pending on slot'); is($stdout_recv, '',
'pg_recvlogical acknowledged changes, nothing pending on slot');
$node_master->safe_psql('postgres', 'CREATE DATABASE otherdb'); $node_master->safe_psql('postgres', 'CREATE DATABASE otherdb');
is($node_master->psql('otherdb', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"), 3, is( $node_master->psql(
'otherdb',
"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
),
3,
'replaying logical slot from another database fails'); 'replaying logical slot from another database fails');
$node_master->safe_psql('otherdb', qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]); $node_master->safe_psql('otherdb',
qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]
);
# make sure you can't drop a slot while active # make sure you can't drop a slot while active
SKIP: SKIP:
{ {
# some Windows Perls at least don't like IPC::Run's start/kill_kill regime. # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
skip "Test fails on Windows perl", 2 if $Config{osname} eq 'MSWin32'; skip "Test fails on Windows perl", 2 if $Config{osname} eq 'MSWin32';
my $pg_recvlogical = IPC::Run::start(['pg_recvlogical', '-d', $node_master->connstr('otherdb'), '-S', 'otherdb_slot', '-f', '-', '--start']); my $pg_recvlogical = IPC::Run::start(
$node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)"); [ 'pg_recvlogical', '-d', $node_master->connstr('otherdb'),
is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 3, '-S', 'otherdb_slot', '-f', '-', '--start' ]);
'dropping a DB with inactive logical slots fails'); $node_master->poll_query_until('otherdb',
"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)"
);
is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
3, 'dropping a DB with inactive logical slots fails');
$pg_recvlogical->kill_kill; $pg_recvlogical->kill_kill;
is($node_master->slot('otherdb_slot')->{'slot_name'}, undef, is($node_master->slot('otherdb_slot')->{'slot_name'},
'logical slot still exists'); undef, 'logical slot still exists');
} }
$node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)"); $node_master->poll_query_until('otherdb',
is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 0, "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)"
'dropping a DB with inactive logical slots succeeds'); );
is($node_master->slot('otherdb_slot')->{'slot_name'}, undef, is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
'logical slot was actually dropped with DB'); 0, 'dropping a DB with inactive logical slots succeeds');
is($node_master->slot('otherdb_slot')->{'slot_name'},
undef, 'logical slot was actually dropped with DB');
# Restarting a node with wal_level = logical that has existing # Restarting a node with wal_level = logical that has existing
# slots must succeed, but decoding from those slots must fail. # slots must succeed, but decoding from those slots must fail.
$node_master->safe_psql('postgres', 'ALTER SYSTEM SET wal_level = replica'); $node_master->safe_psql('postgres', 'ALTER SYSTEM SET wal_level = replica');
is($node_master->safe_psql('postgres', 'SHOW wal_level'), 'logical', 'wal_level is still logical before restart'); is($node_master->safe_psql('postgres', 'SHOW wal_level'),
'logical', 'wal_level is still logical before restart');
$node_master->restart; $node_master->restart;
is($node_master->safe_psql('postgres', 'SHOW wal_level'), 'replica', 'wal_level is replica'); is($node_master->safe_psql('postgres', 'SHOW wal_level'),
isnt($node_master->slot('test_slot')->{'catalog_xmin'}, '0', 'replica', 'wal_level is replica');
'restored slot catalog_xmin is nonzero'); isnt($node_master->slot('test_slot')->{'catalog_xmin'},
is($node_master->psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]), 3, '0', 'restored slot catalog_xmin is nonzero');
is( $node_master->psql(
'postgres',
qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]),
3,
'reading from slot with wal_level < logical fails'); 'reading from slot with wal_level < logical fails');
is($node_master->psql('postgres', q[SELECT pg_drop_replication_slot('test_slot')]), 0, is( $node_master->psql(
'postgres', q[SELECT pg_drop_replication_slot('test_slot')]),
0,
'can drop logical slot while wal_level = replica'); 'can drop logical slot while wal_level = replica');
is($node_master->slot('test_slot')->{'catalog_xmin'}, '', 'slot was dropped'); is($node_master->slot('test_slot')->{'catalog_xmin'}, '', 'slot was dropped');

View File

@ -12,7 +12,8 @@ use Test::More tests => 1;
my $node_master = get_new_node('master'); my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1); $node_master->init(allows_streaming => 1);
$node_master->append_conf('postgresql.conf', qq{ $node_master->append_conf(
'postgresql.conf', qq{
fsync = on fsync = on
wal_log_hints = on wal_log_hints = on
max_prepared_transactions = 5 max_prepared_transactions = 5
@ -29,7 +30,8 @@ $node_standby->init_from_backup($node_master, 'master_backup',
has_streaming => 1); has_streaming => 1);
$node_standby->start; $node_standby->start;
$node_master->psql('postgres', qq{ $node_master->psql(
'postgres', qq{
create table testtab (a int, b char(100)); create table testtab (a int, b char(100));
insert into testtab select generate_series(1,1000), 'foo'; insert into testtab select generate_series(1,1000), 'foo';
insert into testtab select generate_series(1,1000), 'foo'; insert into testtab select generate_series(1,1000), 'foo';
@ -37,7 +39,8 @@ delete from testtab where ctid > '(8,0)';
}); });
# Take a lock on the table to prevent following vacuum from truncating it # Take a lock on the table to prevent following vacuum from truncating it
$node_master->psql('postgres', qq{ $node_master->psql(
'postgres', qq{
begin; begin;
lock table testtab in row share mode; lock table testtab in row share mode;
prepare transaction 'p1'; prepare transaction 'p1';
@ -51,7 +54,8 @@ $node_master->psql('postgres', 'checkpoint');
# Now do some more insert/deletes, another vacuum to ensure full-page writes # Now do some more insert/deletes, another vacuum to ensure full-page writes
# are done # are done
$node_master->psql('postgres', qq{ $node_master->psql(
'postgres', qq{
insert into testtab select generate_series(1,1000), 'foo'; insert into testtab select generate_series(1,1000), 'foo';
delete from testtab where ctid > '(8,0)'; delete from testtab where ctid > '(8,0)';
vacuum verbose testtab; vacuum verbose testtab;
@ -61,7 +65,8 @@ vacuum verbose testtab;
$node_standby->psql('postgres', 'checkpoint'); $node_standby->psql('postgres', 'checkpoint');
# Release the lock, vacuum again which should lead to truncation # Release the lock, vacuum again which should lead to truncation
$node_master->psql('postgres', qq{ $node_master->psql(
'postgres', qq{
rollback prepared 'p1'; rollback prepared 'p1';
vacuum verbose testtab; vacuum verbose testtab;
}); });
@ -78,8 +83,7 @@ $node_standby->poll_query_until('postgres', $caughtup_query)
# Promote the standby # Promote the standby
$node_standby->promote; $node_standby->promote;
$node_standby->poll_query_until('postgres', $node_standby->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
"SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby"; or die "Timed out while waiting for promotion of standby";
$node_standby->psql('postgres', 'checkpoint'); $node_standby->psql('postgres', 'checkpoint');
@ -87,6 +91,8 @@ $node_standby->psql('postgres', 'checkpoint');
$node_standby->restart; $node_standby->restart;
# Insert should work on standby # Insert should work on standby
is($node_standby->psql('postgres', is( $node_standby->psql(
'postgres',
qq{insert into testtab select generate_series(1,1000), 'foo';}), qq{insert into testtab select generate_series(1,1000), 'foo';}),
0, 'INSERT succeeds with truncated relation FSM'); 0,
'INSERT succeeds with truncated relation FSM');

View File

@ -9,7 +9,8 @@ use Test::More tests => 12;
# Setup master node # Setup master node
my $node_master = get_new_node("master"); my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1); $node_master->init(allows_streaming => 1);
$node_master->append_conf('postgresql.conf', qq( $node_master->append_conf(
'postgresql.conf', qq(
max_prepared_transactions = 10 max_prepared_transactions = 10
log_checkpoints = true log_checkpoints = true
)); ));
@ -19,11 +20,13 @@ $node_master->psql('postgres', "CREATE TABLE t_009_tbl (id int)");
# Setup slave node # Setup slave node
my $node_slave = get_new_node('slave'); my $node_slave = get_new_node('slave');
$node_slave->init_from_backup($node_master, 'master_backup', has_streaming => 1); $node_slave->init_from_backup($node_master, 'master_backup',
has_streaming => 1);
$node_slave->start; $node_slave->start;
# Switch to synchronous replication # Switch to synchronous replication
$node_master->append_conf('postgresql.conf', qq( $node_master->append_conf(
'postgresql.conf', qq(
synchronous_standby_names = '*' synchronous_standby_names = '*'
)); ));
$node_master->psql('postgres', "SELECT pg_reload_conf()"); $node_master->psql('postgres', "SELECT pg_reload_conf()");
@ -38,7 +41,8 @@ my $psql_rc = '';
# files. # files.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1; SAVEPOINT s1;
@ -64,7 +68,8 @@ is($psql_rc, '0', 'Rollback prepared transaction after restart');
# transaction using dedicated WAL records. # transaction using dedicated WAL records.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
CHECKPOINT; CHECKPOINT;
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
@ -89,7 +94,8 @@ is($psql_rc, '0', 'Rollback prepared transaction after teardown');
# Check that WAL replay can handle several transactions with same GID name. # Check that WAL replay can handle several transactions with same GID name.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
CHECKPOINT; CHECKPOINT;
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
@ -113,7 +119,8 @@ is($psql_rc, '0', 'Replay several transactions with same GID');
# while replaying transaction commits. # while replaying transaction commits.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1; SAVEPOINT s1;
@ -122,7 +129,8 @@ $node_master->psql('postgres', "
COMMIT PREPARED 'xact_009_1';"); COMMIT PREPARED 'xact_009_1';");
$node_master->teardown_node; $node_master->teardown_node;
$node_master->start; $node_master->start;
$psql_rc = $node_master->psql('postgres', " $psql_rc = $node_master->psql(
'postgres', "
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1; SAVEPOINT s1;
@ -138,14 +146,17 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# Check that WAL replay will cleanup its shared memory state on running slave. # Check that WAL replay will cleanup its shared memory state on running slave.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1; SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (43); INSERT INTO t_009_tbl VALUES (43);
PREPARE TRANSACTION 'xact_009_1'; PREPARE TRANSACTION 'xact_009_1';
COMMIT PREPARED 'xact_009_1';"); COMMIT PREPARED 'xact_009_1';");
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts", $node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '0', is($psql_out, '0',
"Cleanup of shared memory state on running standby without checkpoint"); "Cleanup of shared memory state on running standby without checkpoint");
@ -155,7 +166,8 @@ is($psql_out, '0',
# prepare and commit to use on-disk twophase files. # prepare and commit to use on-disk twophase files.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1; SAVEPOINT s1;
@ -163,7 +175,9 @@ $node_master->psql('postgres', "
PREPARE TRANSACTION 'xact_009_1';"); PREPARE TRANSACTION 'xact_009_1';");
$node_slave->psql('postgres', "CHECKPOINT"); $node_slave->psql('postgres', "CHECKPOINT");
$node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'"); $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts", $node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '0', is($psql_out, '0',
"Cleanup of shared memory state on running standby after checkpoint"); "Cleanup of shared memory state on running standby after checkpoint");
@ -172,7 +186,8 @@ is($psql_out, '0',
# Check that prepared transactions can be committed on promoted slave. # Check that prepared transactions can be committed on promoted slave.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1; SAVEPOINT s1;
@ -180,8 +195,7 @@ $node_master->psql('postgres', "
PREPARE TRANSACTION 'xact_009_1';"); PREPARE TRANSACTION 'xact_009_1';");
$node_master->teardown_node; $node_master->teardown_node;
$node_slave->promote; $node_slave->promote;
$node_slave->poll_query_until('postgres', $node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
"SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby"; or die "Timed out while waiting for promotion of standby";
$psql_rc = $node_slave->psql('postgres', "COMMIT PREPARED 'xact_009_1'"); $psql_rc = $node_slave->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
@ -190,7 +204,8 @@ is($psql_rc, '0', "Restore of prepared transaction on promoted slave");
# change roles # change roles
($node_master, $node_slave) = ($node_slave, $node_master); ($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master); $node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq( $node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest' recovery_target_timeline='latest'
)); ));
$node_slave->start; $node_slave->start;
@ -202,7 +217,8 @@ $node_slave->start;
# consistent. # consistent.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (42); INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1; SAVEPOINT s1;
@ -211,11 +227,12 @@ $node_master->psql('postgres', "
$node_master->stop; $node_master->stop;
$node_slave->restart; $node_slave->restart;
$node_slave->promote; $node_slave->promote;
$node_slave->poll_query_until('postgres', $node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
"SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby"; or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts", $node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '1', is($psql_out, '1',
"Restore prepared transactions from files with master down"); "Restore prepared transactions from files with master down");
@ -223,7 +240,8 @@ is($psql_out, '1',
# restore state # restore state
($node_master, $node_slave) = ($node_slave, $node_master); ($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master); $node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq( $node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest' recovery_target_timeline='latest'
)); ));
$node_slave->start; $node_slave->start;
@ -234,7 +252,8 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# restart while master is down. # restart while master is down.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
INSERT INTO t_009_tbl VALUES (242); INSERT INTO t_009_tbl VALUES (242);
SAVEPOINT s1; SAVEPOINT s1;
@ -245,11 +264,12 @@ $node_master->stop;
$node_slave->teardown_node; $node_slave->teardown_node;
$node_slave->start; $node_slave->start;
$node_slave->promote; $node_slave->promote;
$node_slave->poll_query_until('postgres', $node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
"SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby"; or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts", $node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '1', is($psql_out, '1',
"Restore prepared transactions from records with master down"); "Restore prepared transactions from records with master down");
@ -257,7 +277,8 @@ is($psql_out, '1',
# restore state # restore state
($node_master, $node_slave) = ($node_slave, $node_master); ($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master); $node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq( $node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest' recovery_target_timeline='latest'
)); ));
$node_slave->start; $node_slave->start;
@ -269,7 +290,8 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# XLOG_STANDBY_LOCK wal record. # XLOG_STANDBY_LOCK wal record.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
CREATE TABLE t_009_tbl2 (id int); CREATE TABLE t_009_tbl2 (id int);
SAVEPOINT s1; SAVEPOINT s1;
@ -280,6 +302,8 @@ $node_master->psql('postgres', "
CHECKPOINT; CHECKPOINT;
COMMIT PREPARED 'xact_009_1';"); COMMIT PREPARED 'xact_009_1';");
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts", $node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '0', "Replay prepared transaction with DDL"); is($psql_out, '0', "Replay prepared transaction with DDL");

View File

@ -34,7 +34,8 @@ my ($stdout, $stderr, $ret);
# Initialize master node # Initialize master node
my $node_master = get_new_node('master'); my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1, has_archiving => 1); $node_master->init(allows_streaming => 1, has_archiving => 1);
$node_master->append_conf('postgresql.conf', q[ $node_master->append_conf(
'postgresql.conf', q[
wal_level = 'logical' wal_level = 'logical'
max_replication_slots = 3 max_replication_slots = 3
max_wal_senders = 2 max_wal_senders = 2
@ -60,8 +61,7 @@ $node_master->safe_psql('postgres',
# the same physical copy trick, so: # the same physical copy trick, so:
$node_master->safe_psql('postgres', 'CREATE DATABASE dropme;'); $node_master->safe_psql('postgres', 'CREATE DATABASE dropme;');
$node_master->safe_psql('dropme', $node_master->safe_psql('dropme',
"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');" "SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');");
);
$node_master->safe_psql('postgres', 'CHECKPOINT;'); $node_master->safe_psql('postgres', 'CHECKPOINT;');
@ -76,20 +76,23 @@ $node_replica->init_from_backup(
$node_master, $backup_name, $node_master, $backup_name,
has_streaming => 1, has_streaming => 1,
has_restoring => 1); has_restoring => 1);
$node_replica->append_conf( $node_replica->append_conf('recovery.conf',
'recovery.conf', q[primary_slot_name = 'phys_slot']); q[primary_slot_name = 'phys_slot']);
$node_replica->start; $node_replica->start;
# If we drop 'dropme' on the master, the standby should drop the # If we drop 'dropme' on the master, the standby should drop the
# db and associated slot. # db and associated slot.
is($node_master->psql('postgres', 'DROP DATABASE dropme'), 0, is($node_master->psql('postgres', 'DROP DATABASE dropme'),
'dropped DB with logical slot OK on master'); 0, 'dropped DB with logical slot OK on master');
$node_master->wait_for_catchup($node_replica, 'replay', $node_master->lsn('insert')); $node_master->wait_for_catchup($node_replica, 'replay',
is($node_replica->safe_psql('postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']), '', $node_master->lsn('insert'));
is( $node_replica->safe_psql(
'postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']),
'',
'dropped DB dropme on standby'); 'dropped DB dropme on standby');
is($node_master->slot('dropme_slot')->{'slot_name'}, undef, is($node_master->slot('dropme_slot')->{'slot_name'},
'logical slot was actually dropped on standby'); undef, 'logical slot was actually dropped on standby');
# Back to testing failover... # Back to testing failover...
$node_master->safe_psql('postgres', $node_master->safe_psql('postgres',
@ -109,18 +112,21 @@ is($stdout, 'before_basebackup',
# from the master to make sure its hot_standby_feedback # from the master to make sure its hot_standby_feedback
# has locked in a catalog_xmin on the physical slot, and that # has locked in a catalog_xmin on the physical slot, and that
# any xmin is < the catalog_xmin # any xmin is < the catalog_xmin
$node_master->poll_query_until('postgres', q[ $node_master->poll_query_until(
'postgres', q[
SELECT catalog_xmin IS NOT NULL SELECT catalog_xmin IS NOT NULL
FROM pg_replication_slots FROM pg_replication_slots
WHERE slot_name = 'phys_slot' WHERE slot_name = 'phys_slot'
]); ]);
my $phys_slot = $node_master->slot('phys_slot'); my $phys_slot = $node_master->slot('phys_slot');
isnt($phys_slot->{'xmin'}, '', isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of master');
'xmin assigned on physical slot of master'); isnt($phys_slot->{'catalog_xmin'},
isnt($phys_slot->{'catalog_xmin'}, '', '', 'catalog_xmin assigned on physical slot of master');
'catalog_xmin assigned on physical slot of master');
# Ignore wrap-around here, we're on a new cluster: # Ignore wrap-around here, we're on a new cluster:
cmp_ok($phys_slot->{'xmin'}, '>=', $phys_slot->{'catalog_xmin'}, cmp_ok(
$phys_slot->{'xmin'}, '>=',
$phys_slot->{'catalog_xmin'},
'xmin on physical slot must not be lower than catalog_xmin'); 'xmin on physical slot must not be lower than catalog_xmin');
$node_master->safe_psql('postgres', 'CHECKPOINT'); $node_master->safe_psql('postgres', 'CHECKPOINT');
@ -162,23 +168,30 @@ COMMIT
BEGIN BEGIN
table public.decoding: INSERT: blah[text]:'after failover' table public.decoding: INSERT: blah[text]:'after failover'
COMMIT); COMMIT);
is($stdout, $final_expected_output_bb, 'decoded expected data from slot before_basebackup'); is($stdout, $final_expected_output_bb,
'decoded expected data from slot before_basebackup');
is($stderr, '', 'replay from slot before_basebackup produces no stderr'); is($stderr, '', 'replay from slot before_basebackup produces no stderr');
# So far we've peeked the slots, so when we fetch the same info over # So far we've peeked the slots, so when we fetch the same info over
# pg_recvlogical we should get complete results. First, find out the commit lsn # pg_recvlogical we should get complete results. First, find out the commit lsn
# of the last transaction. There's no max(pg_lsn), so: # of the last transaction. There's no max(pg_lsn), so:
my $endpos = $node_replica->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"); my $endpos = $node_replica->safe_psql('postgres',
"SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
);
# now use the walsender protocol to peek the slot changes and make sure we see # now use the walsender protocol to peek the slot changes and make sure we see
# the same results. # the same results.
$stdout = $node_replica->pg_recvlogical_upto('postgres', 'before_basebackup', $stdout = $node_replica->pg_recvlogical_upto(
$endpos, 30, 'include-xids' => '0', 'skip-empty-xacts' => '1'); 'postgres', 'before_basebackup',
$endpos, 30,
'include-xids' => '0',
'skip-empty-xacts' => '1');
# walsender likes to add a newline # walsender likes to add a newline
chomp($stdout); chomp($stdout);
is($stdout, $final_expected_output_bb, 'got same output from walsender via pg_recvlogical on before_basebackup'); is($stdout, $final_expected_output_bb,
'got same output from walsender via pg_recvlogical on before_basebackup');
$node_replica->teardown_node(); $node_replica->teardown_node();

View File

@ -9,6 +9,7 @@ use Test::More;
use Config; use Config;
if ($Config{osname} eq 'MSWin32') if ($Config{osname} eq 'MSWin32')
{ {
# some Windows Perls at least don't like IPC::Run's start/kill_kill regime. # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
plan skip_all => "Test fails on Windows perl"; plan skip_all => "Test fails on Windows perl";
} }
@ -28,8 +29,14 @@ my ($stdin, $stdout, $stderr) = ('', '', '');
# an xact to be in-progress when we crash and we need to know # an xact to be in-progress when we crash and we need to know
# its xid. # its xid.
my $tx = IPC::Run::start( my $tx = IPC::Run::start(
['psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', $node->connstr('postgres')], [ 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
'<', \$stdin, '>', \$stdout, '2>', \$stderr); $node->connstr('postgres') ],
'<',
\$stdin,
'>',
\$stdout,
'2>',
\$stderr);
$stdin .= q[ $stdin .= q[
BEGIN; BEGIN;
CREATE TABLE mine(x integer); CREATE TABLE mine(x integer);
@ -41,16 +48,19 @@ $tx->pump until $stdout =~ /[[:digit:]]+[\r\n]$/;
my $xid = $stdout; my $xid = $stdout;
chomp($xid); chomp($xid);
is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]), 'in progress', 'own xid is in-progres'); is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
'in progress', 'own xid is in-progres');
# Crash and restart the postmaster # Crash and restart the postmaster
$node->stop('immediate'); $node->stop('immediate');
$node->start; $node->start;
# Make sure we really got a new xid # Make sure we really got a new xid
cmp_ok($node->safe_psql('postgres', 'SELECT txid_current()'), '>', $xid, cmp_ok($node->safe_psql('postgres', 'SELECT txid_current()'),
'new xid after restart is greater'); '>', $xid, 'new xid after restart is greater');
# and make sure we show the in-progress xact as aborted # and make sure we show the in-progress xact as aborted
is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]), 'aborted', 'xid is aborted after crash'); is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
'aborted', 'xid is aborted after crash');
$tx->kill_kill; $tx->kill_kill;

View File

@ -9,7 +9,8 @@ use Test::More tests => 12;
# Setup master node # Setup master node
my $node_master = get_new_node("master"); my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1); $node_master->init(allows_streaming => 1);
$node_master->append_conf('postgresql.conf', qq( $node_master->append_conf(
'postgresql.conf', qq(
max_prepared_transactions = 10 max_prepared_transactions = 10
log_checkpoints = true log_checkpoints = true
)); ));
@ -19,11 +20,13 @@ $node_master->psql('postgres', "CREATE TABLE t_012_tbl (id int)");
# Setup slave node # Setup slave node
my $node_slave = get_new_node('slave'); my $node_slave = get_new_node('slave');
$node_slave->init_from_backup($node_master, 'master_backup', has_streaming => 1); $node_slave->init_from_backup($node_master, 'master_backup',
has_streaming => 1);
$node_slave->start; $node_slave->start;
# Switch to synchronous replication # Switch to synchronous replication
$node_master->append_conf('postgresql.conf', qq( $node_master->append_conf(
'postgresql.conf', qq(
synchronous_standby_names = '*' synchronous_standby_names = '*'
)); ));
$node_master->psql('postgres', "SELECT pg_reload_conf()"); $node_master->psql('postgres', "SELECT pg_reload_conf()");
@ -36,7 +39,8 @@ my $psql_rc = '';
# so that it won't conflict with savepoint xids. # so that it won't conflict with savepoint xids.
############################################################################### ###############################################################################
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
DELETE FROM t_012_tbl; DELETE FROM t_012_tbl;
INSERT INTO t_012_tbl VALUES (43); INSERT INTO t_012_tbl VALUES (43);
@ -55,7 +59,8 @@ $node_master->psql('postgres', "
$node_master->stop; $node_master->stop;
$node_master->start; $node_master->start;
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
-- here we can get xid of previous savepoint if nextXid -- here we can get xid of previous savepoint if nextXid
-- wasn't properly advanced -- wasn't properly advanced
BEGIN; BEGIN;
@ -63,7 +68,9 @@ $node_master->psql('postgres', "
ROLLBACK; ROLLBACK;
COMMIT PREPARED 'xact_012_1';"); COMMIT PREPARED 'xact_012_1';");
$node_master->psql('postgres', "SELECT count(*) FROM t_012_tbl", $node_master->psql(
'postgres',
"SELECT count(*) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '6', "Check nextXid handling for prepared subtransactions"); is($psql_out, '6', "Check nextXid handling for prepared subtransactions");
@ -75,7 +82,8 @@ is($psql_out, '6', "Check nextXid handling for prepared subtransactions");
$node_master->psql('postgres', "DELETE FROM t_012_tbl"); $node_master->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql # Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer) CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void RETURNS void
LANGUAGE plpgsql LANGUAGE plpgsql
@ -87,39 +95,48 @@ $node_master->psql('postgres', "
RETURN; RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END; EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;"); \$\$;");
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
SELECT hs_subxids(127); SELECT hs_subxids(127);
COMMIT;"); COMMIT;");
$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert')); $node_master->wait_for_catchup($node_slave, 'replay',
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_master->lsn('insert'));
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '8128', "Visible"); is($psql_out, '8128', "Visible");
$node_master->stop; $node_master->stop;
$node_slave->promote; $node_slave->promote;
$node_slave->poll_query_until('postgres', $node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
"SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby"; or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '8128', "Visible"); is($psql_out, '8128', "Visible");
# restore state # restore state
($node_master, $node_slave) = ($node_slave, $node_master); ($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master); $node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq( $node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest' recovery_target_timeline='latest'
)); ));
$node_slave->start; $node_slave->start;
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '8128', "Visible"); is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl"); $node_master->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql # Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer) CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void RETURNS void
LANGUAGE plpgsql LANGUAGE plpgsql
@ -131,67 +148,87 @@ $node_master->psql('postgres', "
RETURN; RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END; EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;"); \$\$;");
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
SELECT hs_subxids(127); SELECT hs_subxids(127);
PREPARE TRANSACTION 'xact_012_1';"); PREPARE TRANSACTION 'xact_012_1';");
$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert')); $node_master->wait_for_catchup($node_slave, 'replay',
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_master->lsn('insert'));
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '-1', "Not visible"); is($psql_out, '-1', "Not visible");
$node_master->stop; $node_master->stop;
$node_slave->promote; $node_slave->promote;
$node_slave->poll_query_until('postgres', $node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
"SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby"; or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '-1', "Not visible"); is($psql_out, '-1', "Not visible");
# restore state # restore state
($node_master, $node_slave) = ($node_slave, $node_master); ($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master); $node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq( $node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest' recovery_target_timeline='latest'
)); ));
$node_slave->start; $node_slave->start;
$psql_rc = $node_master->psql('postgres', "COMMIT PREPARED 'xact_012_1'"); $psql_rc = $node_master->psql('postgres', "COMMIT PREPARED 'xact_012_1'");
is($psql_rc, '0', "Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"); is($psql_rc, '0',
"Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"
);
$node_master->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_master->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '8128', "Visible"); is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl"); $node_master->psql('postgres', "DELETE FROM t_012_tbl");
$node_master->psql('postgres', " $node_master->psql(
'postgres', "
BEGIN; BEGIN;
SELECT hs_subxids(201); SELECT hs_subxids(201);
PREPARE TRANSACTION 'xact_012_1';"); PREPARE TRANSACTION 'xact_012_1';");
$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert')); $node_master->wait_for_catchup($node_slave, 'replay',
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_master->lsn('insert'));
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '-1', "Not visible"); is($psql_out, '-1', "Not visible");
$node_master->stop; $node_master->stop;
$node_slave->promote; $node_slave->promote;
$node_slave->poll_query_until('postgres', $node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
"SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby"; or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '-1', "Not visible"); is($psql_out, '-1', "Not visible");
# restore state # restore state
($node_master, $node_slave) = ($node_slave, $node_master); ($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master); $node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq( $node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest' recovery_target_timeline='latest'
)); ));
$node_slave->start; $node_slave->start;
$psql_rc = $node_master->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'"); $psql_rc = $node_master->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'");
is($psql_rc, '0', "Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"); is($psql_rc, '0',
"Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"
);
$node_master->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", $node_master->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out); stdout => \$psql_out);
is($psql_out, '-1', "Not visible"); is($psql_out, '-1', "Not visible");

View File

@ -98,7 +98,8 @@ sub switch_server_cert
my $cafile = $_[2] || "root+client_ca"; my $cafile = $_[2] || "root+client_ca";
my $pgdata = $node->data_dir; my $pgdata = $node->data_dir;
note "reloading server with certfile \"$certfile\" and cafile \"$cafile\""; note
"reloading server with certfile \"$certfile\" and cafile \"$cafile\"";
open my $sslconf, '>', "$pgdata/sslconfig.conf"; open my $sslconf, '>', "$pgdata/sslconfig.conf";
print $sslconf "ssl=on\n"; print $sslconf "ssl=on\n";

View File

@ -26,19 +26,15 @@ $node_publisher->safe_psql('postgres',
"CREATE TABLE tab_rep (a int primary key)"); "CREATE TABLE tab_rep (a int primary key)");
# Setup structure on subscriber # Setup structure on subscriber
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)");
"CREATE TABLE tab_notrep (a int)"); $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_ins (a int)");
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_full (a int)");
"CREATE TABLE tab_ins (a int)");
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_full (a int)");
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_rep (a int primary key)"); "CREATE TABLE tab_rep (a int primary key)");
# Setup logical replication # Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub");
"CREATE PUBLICATION tap_pub");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_ins_only WITH (publish = insert)"); "CREATE PUBLICATION tap_pub_ins_only WITH (publish = insert)");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
@ -48,7 +44,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub'; my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only"); "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only"
);
# Wait for subscriber to finish initialization # Wait for subscriber to finish initialization
my $caughtup_query = my $caughtup_query =
@ -72,27 +69,23 @@ is($result, qq(1002), 'check initial data was copied to subscriber');
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1,50)"); "INSERT INTO tab_ins SELECT generate_series(1,50)");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 20");
"DELETE FROM tab_ins WHERE a > 20"); $node_publisher->safe_psql('postgres', "UPDATE tab_ins SET a = -a");
$node_publisher->safe_psql('postgres',
"UPDATE tab_ins SET a = -a");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"INSERT INTO tab_rep SELECT generate_series(1,50)"); "INSERT INTO tab_rep SELECT generate_series(1,50)");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "DELETE FROM tab_rep WHERE a > 20");
"DELETE FROM tab_rep WHERE a > 20"); $node_publisher->safe_psql('postgres', "UPDATE tab_rep SET a = -a");
$node_publisher->safe_psql('postgres',
"UPDATE tab_rep SET a = -a");
$node_publisher->poll_query_until('postgres', $caughtup_query) $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins"); "SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1052|1|1002), 'check replicated inserts on subscriber'); is($result, qq(1052|1|1002), 'check replicated inserts on subscriber');
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep"); "SELECT count(*), min(a), max(a) FROM tab_rep");
is($result, qq(20|-20|-1), 'check replicated changes on subscriber'); is($result, qq(20|-20|-1), 'check replicated changes on subscriber');
# insert some duplicate rows # insert some duplicate rows
@ -110,107 +103,114 @@ $node_subscriber->safe_psql('postgres',
"ALTER TABLE tab_ins REPLICA IDENTITY FULL"); "ALTER TABLE tab_ins REPLICA IDENTITY FULL");
# and do the update # and do the update
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "UPDATE tab_full SET a = a * a");
"UPDATE tab_full SET a = a * a");
# Wait for subscription to catch up # Wait for subscription to catch up
$node_publisher->poll_query_until('postgres', $caughtup_query) $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full"); "SELECT count(*), min(a), max(a) FROM tab_full");
is($result, qq(20|1|100), 'update works with REPLICA IDENTITY FULL and duplicate tuples'); is($result, qq(20|1|100),
'update works with REPLICA IDENTITY FULL and duplicate tuples');
# check that change of connection string and/or publication list causes # check that change of connection string and/or publication list causes
# restart of subscription workers. Not all of these are registered as tests # restart of subscription workers. Not all of these are registered as tests
# as we need to poll for a change but the test suite will fail none the less # as we need to poll for a change but the test suite will fail none the less
# when something goes wrong. # when something goes wrong.
my $oldpid = $node_publisher->safe_psql('postgres', my $oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"); "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
);
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'"); "ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'"
);
$node_publisher->poll_query_until('postgres', $node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';") "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
or die "Timed out while waiting for apply to restart"; ) or die "Timed out while waiting for apply to restart";
$oldpid = $node_publisher->safe_psql('postgres', $oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"); "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
);
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)"); "ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)"
);
$node_publisher->poll_query_until('postgres', $node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';") "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
or die "Timed out while waiting for apply to restart"; ) or die "Timed out while waiting for apply to restart";
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1001,1100)"); "INSERT INTO tab_ins SELECT generate_series(1001,1100)");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "DELETE FROM tab_rep");
"DELETE FROM tab_rep");
$node_publisher->poll_query_until('postgres', $caughtup_query) $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins"); "SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1152|1|1100), 'check replicated inserts after subscription publication change'); is($result, qq(1152|1|1100),
'check replicated inserts after subscription publication change');
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep"); "SELECT count(*), min(a), max(a) FROM tab_rep");
is($result, qq(20|-20|-1), 'check changes skipped after subscription publication change'); is($result, qq(20|-20|-1),
'check changes skipped after subscription publication change');
# check alter publication (relcache invalidation etc) # check alter publication (relcache invalidation etc)
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"ALTER PUBLICATION tap_pub_ins_only SET (publish = 'insert, delete')"); "ALTER PUBLICATION tap_pub_ins_only SET (publish = 'insert, delete')");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_full"); "ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_full");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 0");
"DELETE FROM tab_ins WHERE a > 0");
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)"); "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)"
$node_publisher->safe_psql('postgres', );
"INSERT INTO tab_full VALUES(0)"); $node_publisher->safe_psql('postgres', "INSERT INTO tab_full VALUES(0)");
$node_publisher->poll_query_until('postgres', $caughtup_query) $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
# note that data are different on provider and subscriber # note that data are different on provider and subscriber
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins"); "SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1052|1|1002), 'check replicated deletes after alter publication'); is($result, qq(1052|1|1002),
'check replicated deletes after alter publication');
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full"); "SELECT count(*), min(a), max(a) FROM tab_full");
is($result, qq(21|0|100), 'check replicated insert after alter publication'); is($result, qq(21|0|100), 'check replicated insert after alter publication');
# check restart on rename # check restart on rename
$oldpid = $node_publisher->safe_psql('postgres', $oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"); "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
);
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed"); "ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed");
$node_publisher->poll_query_until('postgres', $node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';") "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
or die "Timed out while waiting for apply to restart"; ) or die "Timed out while waiting for apply to restart";
# check all the cleanup # check all the cleanup
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed"); $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed");
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription"); "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber'); is($result, qq(0), 'check subscription was dropped on subscriber');
$result = $result = $node_publisher->safe_psql('postgres',
$node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots"); "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher'); is($result, qq(0), 'check replication slot was dropped on publisher');
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel"); "SELECT count(*) FROM pg_subscription_rel");
is($result, qq(0), 'check subscription relation status was dropped on subscriber'); is($result, qq(0),
'check subscription relation status was dropped on subscriber');
$result = $result = $node_publisher->safe_psql('postgres',
$node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots"); "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher'); is($result, qq(0), 'check replication slot was dropped on publisher');
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin"); "SELECT count(*) FROM pg_replication_origin");
is($result, qq(0), 'check replication origin was dropped on subscriber'); is($result, qq(0), 'check replication origin was dropped on subscriber');
$node_subscriber->stop('fast'); $node_subscriber->stop('fast');

View File

@ -103,7 +103,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub'; my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)"); "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)"
);
# Wait for subscriber to finish initialization # Wait for subscriber to finish initialization
my $caughtup_query = my $caughtup_query =
@ -118,7 +119,8 @@ $node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data"; or die "Timed out while waiting for subscriber to synchronize data";
# Insert initial test data # Insert initial test data
$node_publisher->safe_psql('postgres', qq( $node_publisher->safe_psql(
'postgres', qq(
-- test_tbl_one_array_col -- test_tbl_one_array_col
INSERT INTO tst_one_array (a, b) VALUES INSERT INTO tst_one_array (a, b) VALUES
(1, '{1, 2, 3}'), (1, '{1, 2, 3}'),
@ -248,7 +250,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber # Check the data on subscriber
my $result = $node_subscriber->safe_psql('postgres', qq( my $result = $node_subscriber->safe_psql(
'postgres', qq(
SET timezone = '+2'; SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a; SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a; SELECT a, b, c, d FROM tst_arrays ORDER BY a;
@ -334,7 +337,8 @@ e|{d,NULL}
'check replicated inserts on subscriber'); 'check replicated inserts on subscriber');
# Run batch of updates # Run batch of updates
$node_publisher->safe_psql('postgres', qq( $node_publisher->safe_psql(
'postgres', qq(
UPDATE tst_one_array SET b = '{4, 5, 6}' WHERE a = 1; UPDATE tst_one_array SET b = '{4, 5, 6}' WHERE a = 1;
UPDATE tst_one_array SET b = '{4, 5, 6, 1}' WHERE a > 3; UPDATE tst_one_array SET b = '{4, 5, 6, 1}' WHERE a > 3;
UPDATE tst_arrays SET b = '{"1a", "2b", "3c"}', c = '{1.0, 2.0, 3.0}', d = '{"1 day 1 second", "2 days 2 seconds", "3 days 3 second"}' WHERE a = '{1, 2, 3}'; UPDATE tst_arrays SET b = '{"1a", "2b", "3c"}', c = '{1.0, 2.0, 3.0}', d = '{"1 day 1 second", "2 days 2 seconds", "3 days 3 second"}' WHERE a = '{1, 2, 3}';
@ -368,7 +372,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber # Check the data on subscriber
$result = $node_subscriber->safe_psql('postgres', qq( $result = $node_subscriber->safe_psql(
'postgres', qq(
SET timezone = '+2'; SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a; SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a; SELECT a, b, c, d FROM tst_arrays ORDER BY a;
@ -454,7 +459,8 @@ e|{e,d}
'check replicated updates on subscriber'); 'check replicated updates on subscriber');
# Run batch of deletes # Run batch of deletes
$node_publisher->safe_psql('postgres', qq( $node_publisher->safe_psql(
'postgres', qq(
DELETE FROM tst_one_array WHERE a = 1; DELETE FROM tst_one_array WHERE a = 1;
DELETE FROM tst_one_array WHERE b = '{2, 3, 1}'; DELETE FROM tst_one_array WHERE b = '{2, 3, 1}';
DELETE FROM tst_arrays WHERE a = '{1, 2, 3}'; DELETE FROM tst_arrays WHERE a = '{1, 2, 3}';
@ -487,7 +493,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber # Check the data on subscriber
$result = $node_subscriber->safe_psql('postgres', qq( $result = $node_subscriber->safe_psql(
'postgres', qq(
SET timezone = '+2'; SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a; SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a; SELECT a, b, c, d FROM tst_arrays ORDER BY a;

View File

@ -19,13 +19,15 @@ $node_subscriber->start;
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"CREATE TABLE tab_fk (bid int PRIMARY KEY);"); "CREATE TABLE tab_fk (bid int PRIMARY KEY);");
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"); "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"
);
# Setup structure on subscriber # Setup structure on subscriber
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_fk (bid int PRIMARY KEY);"); "CREATE TABLE tab_fk (bid int PRIMARY KEY);");
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"); "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"
);
# Setup logical replication # Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
@ -34,7 +36,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub'; my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"); "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"
);
# Wait for subscriber to finish initialization # Wait for subscriber to finish initialization
my $caughtup_query = my $caughtup_query =
@ -51,17 +54,16 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
# Check data on subscriber # Check data on subscriber
my $result = my $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk;"); "SELECT count(*), min(bid), max(bid) FROM tab_fk;");
is($result, qq(1|1|1), 'check replicated tab_fk inserts on subscriber'); is($result, qq(1|1|1), 'check replicated tab_fk inserts on subscriber');
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;"); "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(1|1|1), 'check replicated tab_fk_ref inserts on subscriber'); is($result, qq(1|1|1), 'check replicated tab_fk_ref inserts on subscriber');
# Drop the fk on publisher # Drop the fk on publisher
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "DROP TABLE tab_fk CASCADE;");
"DROP TABLE tab_fk CASCADE;");
# Insert data # Insert data
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
@ -71,12 +73,13 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
# FK is not enforced on subscriber # FK is not enforced on subscriber
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;"); "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(2|1|2), 'check FK ignored on subscriber'); is($result, qq(2|1|2), 'check FK ignored on subscriber');
# Add replica trigger # Add replica trigger
$node_subscriber->safe_psql('postgres', qq{ $node_subscriber->safe_psql(
'postgres', qq{
CREATE FUNCTION filter_basic_dml_fn() RETURNS TRIGGER AS \$\$ CREATE FUNCTION filter_basic_dml_fn() RETURNS TRIGGER AS \$\$
BEGIN BEGIN
IF (TG_OP = 'INSERT') THEN IF (TG_OP = 'INSERT') THEN
@ -105,8 +108,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
# The row should be skipped on subscriber # The row should be skipped on subscriber
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;"); "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(2|1|2), 'check replica trigger applied on subscriber'); is($result, qq(2|1|2), 'check replica trigger applied on subscriber');
$node_subscriber->stop('fast'); $node_subscriber->stop('fast');

View File

@ -13,7 +13,8 @@ $node_publisher->start;
# Create subscriber node # Create subscriber node
my $node_subscriber = get_new_node('subscriber'); my $node_subscriber = get_new_node('subscriber');
$node_subscriber->init(allows_streaming => 'logical'); $node_subscriber->init(allows_streaming => 'logical');
$node_subscriber->append_conf('postgresql.conf', "wal_retrieve_retry_interval = 1ms"); $node_subscriber->append_conf('postgresql.conf',
"wal_retrieve_retry_interval = 1ms");
$node_subscriber->start; $node_subscriber->start;
# Create some preexisting content on publisher # Create some preexisting content on publisher
@ -33,7 +34,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub'; my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"); "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
);
# Wait for subscriber to finish initialization # Wait for subscriber to finish initialization
my $caughtup_query = my $caughtup_query =
@ -59,17 +61,16 @@ $node_publisher->safe_psql('postgres',
# recreate the subscription, it will try to do initial copy # recreate the subscription, it will try to do initial copy
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"); "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
);
# but it will be stuck on data copy as it will fail on constraint # but it will be stuck on data copy as it will fail on constraint
my $started_query = my $started_query = "SELECT srsubstate = 'd' FROM pg_subscription_rel;";
"SELECT srsubstate = 'd' FROM pg_subscription_rel;";
$node_subscriber->poll_query_until('postgres', $started_query) $node_subscriber->poll_query_until('postgres', $started_query)
or die "Timed out while waiting for subscriber to start sync"; or die "Timed out while waiting for subscriber to start sync";
# remove the conflicting data # remove the conflicting data
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
"DELETE FROM tab_rep;");
# wait for sync to finish this time # wait for sync to finish this time
$node_subscriber->poll_query_until('postgres', $synced_query) $node_subscriber->poll_query_until('postgres', $synced_query)
@ -82,28 +83,30 @@ is($result, qq(20), 'initial data synced for second sub');
# now check another subscription for the same node pair # now check another subscription for the same node pair
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"); "CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"
);
# wait for it to start # wait for it to start
$node_subscriber->poll_query_until('postgres', "SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL") $node_subscriber->poll_query_until('postgres',
or die "Timed out while waiting for subscriber to start"; "SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL"
) or die "Timed out while waiting for subscriber to start";
# and drop both subscriptions # and drop both subscriptions
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub"); $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub2"); $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub2");
# check subscriptions are removed # check subscriptions are removed
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription"); "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'second and third sub are dropped'); is($result, qq(0), 'second and third sub are dropped');
# remove the conflicting data # remove the conflicting data
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
"DELETE FROM tab_rep;");
# recreate the subscription again # recreate the subscription again
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"); "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
);
# and wait for data sync to finish again # and wait for data sync to finish again
$node_subscriber->poll_query_until('postgres', $synced_query) $node_subscriber->poll_query_until('postgres', $synced_query)
@ -115,8 +118,7 @@ $result =
is($result, qq(20), 'initial data synced for fourth sub'); is($result, qq(20), 'initial data synced for fourth sub');
# add new table on subscriber # add new table on subscriber
$node_subscriber->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_rep_next (a int)");
"CREATE TABLE tab_rep_next (a int)");
# setup structure with existing data on pubisher # setup structure with existing data on pubisher
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
@ -126,8 +128,8 @@ $node_publisher->safe_psql('postgres',
$node_publisher->poll_query_until('postgres', $caughtup_query) $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next"); "SELECT count(*) FROM tab_rep_next");
is($result, qq(0), 'no data for table added after subscription initialized'); is($result, qq(0), 'no data for table added after subscription initialized');
# ask for data sync # ask for data sync
@ -138,9 +140,10 @@ $node_subscriber->safe_psql('postgres',
$node_subscriber->poll_query_until('postgres', $synced_query) $node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data"; or die "Timed out while waiting for subscriber to synchronize data";
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next"); "SELECT count(*) FROM tab_rep_next");
is($result, qq(10), 'data for table added after subscription initialized are now synced'); is($result, qq(10),
'data for table added after subscription initialized are now synced');
# Add some data # Add some data
$node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres',
@ -150,9 +153,10 @@ $node_publisher->safe_psql('postgres',
$node_publisher->poll_query_until('postgres', $caughtup_query) $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up"; or die "Timed out while waiting for subscriber to catch up";
$result = $result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next"); "SELECT count(*) FROM tab_rep_next");
is($result, qq(20), 'changes for table added after subscription initialized replicated'); is($result, qq(20),
'changes for table added after subscription initialized replicated');
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub"); $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");

View File

@ -10,16 +10,20 @@ sub wait_for_caught_up
my ($node, $appname) = @_; my ($node, $appname) = @_;
$node->poll_query_until('postgres', $node->poll_query_until('postgres',
"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';") "SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';"
or die "Timed out while waiting for subscriber to catch up"; ) or die "Timed out while waiting for subscriber to catch up";
} }
my $node_publisher = get_new_node('publisher'); my $node_publisher = get_new_node('publisher');
$node_publisher->init(allows_streaming => 'logical', extra => ['--locale=C', '--encoding=UTF8']); $node_publisher->init(
allows_streaming => 'logical',
extra => [ '--locale=C', '--encoding=UTF8' ]);
$node_publisher->start; $node_publisher->start;
my $node_subscriber = get_new_node('subscriber'); my $node_subscriber = get_new_node('subscriber');
$node_subscriber->init(allows_streaming => 'logical', extra => ['--locale=C', '--encoding=LATIN1']); $node_subscriber->init(
allows_streaming => 'logical',
extra => [ '--locale=C', '--encoding=LATIN1' ]);
$node_subscriber->start; $node_subscriber->start;
my $ddl = "CREATE TABLE test1 (a int, b text);"; my $ddl = "CREATE TABLE test1 (a int, b text);";
@ -29,16 +33,22 @@ $node_subscriber->safe_psql('postgres', $ddl);
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
my $appname = 'encoding_test'; my $appname = 'encoding_test';
$node_publisher->safe_psql('postgres', "CREATE PUBLICATION mypub FOR ALL TABLES;"); $node_publisher->safe_psql('postgres',
$node_subscriber->safe_psql('postgres', "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;"); "CREATE PUBLICATION mypub FOR ALL TABLES;");
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;"
);
wait_for_caught_up($node_publisher, $appname); wait_for_caught_up($node_publisher, $appname);
$node_publisher->safe_psql('postgres', q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8 $node_publisher->safe_psql('postgres',
q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8
wait_for_caught_up($node_publisher, $appname); wait_for_caught_up($node_publisher, $appname);
is($node_subscriber->safe_psql('postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}), # LATIN1 is( $node_subscriber->safe_psql(
'postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}
), # LATIN1
qq(1), qq(1),
'data replicated to subscriber'); 'data replicated to subscriber');

View File

@ -36,7 +36,8 @@ exit 0 if $ccode !~ m/^#define YY_FLEX_SUBMINOR_VERSION (\d+)$/m;
exit 0 if $1 >= 36; exit 0 if $1 >= 36;
# Apply the desired patch. # Apply the desired patch.
$ccode =~ s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This var may be unused depending upon options. \*/ $ccode =~
s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This var may be unused depending upon options. \*/
.*?) .*?)
return yy_is_jam \? 0 : yy_current_state; return yy_is_jam \? 0 : yy_current_state;
|$1 |$1

View File

@ -20,12 +20,12 @@ our (@ISA, @EXPORT_OK);
my $insttype; my $insttype;
my @client_contribs = ('oid2name', 'pgbench', 'vacuumlo'); my @client_contribs = ('oid2name', 'pgbench', 'vacuumlo');
my @client_program_files = ( my @client_program_files = (
'clusterdb', 'createdb', 'createuser', 'clusterdb', 'createdb', 'createuser', 'dropdb',
'dropdb', 'dropuser', 'ecpg', 'dropuser', 'ecpg', 'libecpg', 'libecpg_compat',
'libecpg', 'libecpg_compat', 'libpgtypes', 'libpq', 'libpgtypes', 'libpq', 'pg_basebackup', 'pg_config',
'pg_basebackup', 'pg_config', 'pg_dump', 'pg_dumpall', 'pg_dump', 'pg_dumpall', 'pg_isready', 'pg_receivewal',
'pg_isready', 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'pg_recvlogical', 'pg_restore', 'psql', 'reindexdb',
'psql', 'reindexdb', 'vacuumdb', @client_contribs); 'vacuumdb', @client_contribs);
sub lcopy sub lcopy
{ {
@ -392,8 +392,8 @@ sub GenerateTimezoneFiles
print "Generating timezone files..."; print "Generating timezone files...";
my @args = ("$conf/zic/zic", '-d', "$target/share/timezone", my @args =
'-p', "$posixrules"); ("$conf/zic/zic", '-d', "$target/share/timezone", '-p', "$posixrules");
foreach (@tzfiles) foreach (@tzfiles)
{ {
my $tzfile = $_; my $tzfile = $_;

View File

@ -35,8 +35,7 @@ my @contrib_uselibpq = ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo');
my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo'); my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo');
my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo'); my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo');
my $contrib_extralibs = undef; my $contrib_extralibs = undef;
my $contrib_extraincludes = my $contrib_extraincludes = { 'dblink' => ['src/backend'] };
{ 'dblink' => ['src/backend'] };
my $contrib_extrasource = { my $contrib_extrasource = {
'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ], 'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ],
'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], }; 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], };
@ -256,8 +255,7 @@ sub mkvcbuild
$libpqwalreceiver->AddIncludeDir('src/interfaces/libpq'); $libpqwalreceiver->AddIncludeDir('src/interfaces/libpq');
$libpqwalreceiver->AddReference($postgres, $libpq); $libpqwalreceiver->AddReference($postgres, $libpq);
my $pgoutput = $solution->AddProject( my $pgoutput = $solution->AddProject('pgoutput', 'dll', '',
'pgoutput', 'dll', '',
'src/backend/replication/pgoutput'); 'src/backend/replication/pgoutput');
$pgoutput->AddReference($postgres); $pgoutput->AddReference($postgres);
@ -504,12 +502,14 @@ sub mkvcbuild
'hstore_plpython' . $pymajorver, 'contrib/hstore_plpython', 'hstore_plpython' . $pymajorver, 'contrib/hstore_plpython',
'plpython' . $pymajorver, 'src/pl/plpython', 'plpython' . $pymajorver, 'src/pl/plpython',
'hstore', 'contrib/hstore'); 'hstore', 'contrib/hstore');
$hstore_plpython->AddDefine('PLPYTHON_LIBNAME="plpython' . $pymajorver . '"'); $hstore_plpython->AddDefine(
'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
my $ltree_plpython = AddTransformModule( my $ltree_plpython = AddTransformModule(
'ltree_plpython' . $pymajorver, 'contrib/ltree_plpython', 'ltree_plpython' . $pymajorver, 'contrib/ltree_plpython',
'plpython' . $pymajorver, 'src/pl/plpython', 'plpython' . $pymajorver, 'src/pl/plpython',
'ltree', 'contrib/ltree'); 'ltree', 'contrib/ltree');
$ltree_plpython->AddDefine('PLPYTHON_LIBNAME="plpython' . $pymajorver . '"'); $ltree_plpython->AddDefine(
'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
} }
if ($solution->{options}->{perl}) if ($solution->{options}->{perl})
@ -586,15 +586,15 @@ sub mkvcbuild
$plperl->AddReference($postgres); $plperl->AddReference($postgres);
my $perl_path = $solution->{options}->{perl} . '\lib\CORE\perl*.lib'; my $perl_path = $solution->{options}->{perl} . '\lib\CORE\perl*.lib';
my @perl_libs = my @perl_libs =
grep { /perl\d+.lib$/ } grep { /perl\d+.lib$/ } glob($perl_path);
glob($perl_path);
if (@perl_libs == 1) if (@perl_libs == 1)
{ {
$plperl->AddLibrary($perl_libs[0]); $plperl->AddLibrary($perl_libs[0]);
} }
else else
{ {
die "could not identify perl library version matching pattern $perl_path\n"; die
"could not identify perl library version matching pattern $perl_path\n";
} }
# Add transform module dependent on plperl # Add transform module dependent on plperl

View File

@ -171,7 +171,8 @@ sub GenerateFiles
print $o "#define USE_OPENSSL 1\n" if ($self->{options}->{openssl}); print $o "#define USE_OPENSSL 1\n" if ($self->{options}->{openssl});
print $o "#define ENABLE_NLS 1\n" if ($self->{options}->{nls}); print $o "#define ENABLE_NLS 1\n" if ($self->{options}->{nls});
print $o "#define BLCKSZ ", 1024 * $self->{options}->{blocksize}, "\n"; print $o "#define BLCKSZ ", 1024 * $self->{options}->{blocksize},
"\n";
print $o "#define RELSEG_SIZE ", print $o "#define RELSEG_SIZE ",
(1024 / $self->{options}->{blocksize}) * (1024 / $self->{options}->{blocksize}) *
$self->{options}->{segsize} * $self->{options}->{segsize} *
@ -281,7 +282,8 @@ sub GenerateFiles
'src/include/utils/fmgrprotos.h', 'src/include/utils/fmgrprotos.h',
'src/backend/utils/fmgrprotos.h')) 'src/backend/utils/fmgrprotos.h'))
{ {
copyFile('src/backend/utils/fmgrprotos.h', copyFile(
'src/backend/utils/fmgrprotos.h',
'src/include/utils/fmgrprotos.h'); 'src/include/utils/fmgrprotos.h');
} }

View File

@ -65,7 +65,8 @@ elsif ($buildwhat)
} }
else else
{ {
system("msbuild pgsql.sln /verbosity:normal $msbflags /p:Configuration=$bconf"); system(
"msbuild pgsql.sln /verbosity:normal $msbflags /p:Configuration=$bconf");
} }
# report status # report status

View File

@ -51,6 +51,7 @@ my $flexflags = ($make =~ /^$basetarg:\s*FLEXFLAGS\s*=\s*(\S.*)/m ? $1 : '');
system("flex $flexflags -o$output $input"); system("flex $flexflags -o$output $input");
if ($? == 0) if ($? == 0)
{ {
# Check for "%option reentrant" in .l file. # Check for "%option reentrant" in .l file.
my $lfile; my $lfile;
open($lfile, '<', $input) || die "opening $input for reading: $!"; open($lfile, '<', $input) || die "opening $input for reading: $!";
@ -58,12 +59,14 @@ if ($? == 0)
close($lfile); close($lfile);
if ($lcode =~ /\%option\sreentrant/) if ($lcode =~ /\%option\sreentrant/)
{ {
# Reentrant scanners usually need a fix to prevent # Reentrant scanners usually need a fix to prevent
# "unused variable" warnings with older flex versions. # "unused variable" warnings with older flex versions.
system("perl src\\tools\\fix-old-flex-code.pl $output"); system("perl src\\tools\\fix-old-flex-code.pl $output");
} }
else else
{ {
# For non-reentrant scanners we need to fix up the yywrap # For non-reentrant scanners we need to fix up the yywrap
# macro definition to keep the MS compiler happy. # macro definition to keep the MS compiler happy.
# For reentrant scanners (like the core scanner) we do not # For reentrant scanners (like the core scanner) we do not

View File

@ -526,6 +526,7 @@ sub fetchRegressOpts
$m =~ s{\\\r?\n}{}g; $m =~ s{\\\r?\n}{}g;
if ($m =~ /^\s*REGRESS_OPTS\s*\+?=(.*)/m) if ($m =~ /^\s*REGRESS_OPTS\s*\+?=(.*)/m)
{ {
# Substitute known Makefile variables, then ignore options that retain # Substitute known Makefile variables, then ignore options that retain
# an unhandled variable reference. Ignore anything that isn't an # an unhandled variable reference. Ignore anything that isn't an
# option starting with "--". # option starting with "--".