Avoid race in RelationBuildDesc() affecting CREATE INDEX CONCURRENTLY.

CIC and REINDEX CONCURRENTLY assume backends see their catalog changes
no later than each backend's next transaction start.  That failed to
hold when a backend absorbed a relevant invalidation in the middle of
running RelationBuildDesc() on the CIC index.  Queries that use the
resulting index can silently fail to find rows.  Fix this for future
index builds by making RelationBuildDesc() loop until it finishes
without accepting a relevant invalidation.  It may be necessary to
reindex to recover from past occurrences; REINDEX CONCURRENTLY suffices.
Back-patch to 9.6 (all supported versions).

Noah Misch and Andrey Borodin, reviewed (in earlier versions) by Andres
Freund.

Discussion: https://postgr.es/m/20210730022548.GA1940096@gust.leadboat.com
This commit is contained in:
Noah Misch 2021-10-23 18:36:38 -07:00
parent 1e9475694b
commit fdd965d074
8 changed files with 368 additions and 93 deletions

View File

@ -0,0 +1,78 @@
# Copyright (c) 2021, PostgreSQL Global Development Group
# Test CREATE INDEX CONCURRENTLY with concurrent modifications
use strict;
use warnings;
use Config;
use PostgresNode;
use TestLib;
use Test::More tests => 4;
my ($node, $result);
#
# Test set-up
#
$node = PostgresNode->new('CIC_test');
$node->init;
$node->append_conf('postgresql.conf', 'lock_timeout = 180000');
$node->start;
$node->safe_psql('postgres', q(CREATE EXTENSION amcheck));
$node->safe_psql('postgres', q(CREATE TABLE tbl(i int)));
$node->safe_psql('postgres', q(CREATE INDEX idx ON tbl(i)));
#
# Stress CIC with pgbench
#
# Run background pgbench with CIC. We cannot mix-in this script into single
# pgbench: CIC will deadlock with itself occasionally.
my $pgbench_out = '';
my $pgbench_timer = IPC::Run::timeout(180);
my $pgbench_h = $node->background_pgbench(
'--no-vacuum --client=1 --transactions=200',
{
'002_pgbench_concurrent_cic' => q(
DROP INDEX CONCURRENTLY idx;
CREATE INDEX CONCURRENTLY idx ON tbl(i);
SELECT bt_index_check('idx',true);
)
},
\$pgbench_out,
$pgbench_timer);
# Run pgbench.
$node->pgbench(
'--no-vacuum --client=5 --transactions=200',
0,
[qr{actually processed}],
[qr{^$}],
'concurrent INSERTs',
{
'002_pgbench_concurrent_transaction' => q(
BEGIN;
INSERT INTO tbl VALUES(0);
COMMIT;
),
'002_pgbench_concurrent_transaction_savepoints' => q(
BEGIN;
SAVEPOINT s1;
INSERT INTO tbl VALUES(0);
COMMIT;
)
});
$pgbench_h->pump_nb;
$pgbench_h->finish();
$result =
($Config{osname} eq "MSWin32")
? ($pgbench_h->full_results)[0]
: $pgbench_h->result(0);
is($result, 0, "pgbench with CIC works");
# done
$node->stop;
done_testing();

View File

@ -628,7 +628,7 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
int i; int i;
if (msg->rc.relId == InvalidOid) if (msg->rc.relId == InvalidOid)
RelationCacheInvalidate(); RelationCacheInvalidate(false);
else else
RelationCacheInvalidateEntry(msg->rc.relId); RelationCacheInvalidateEntry(msg->rc.relId);
@ -685,12 +685,18 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
*/ */
void void
InvalidateSystemCaches(void) InvalidateSystemCaches(void)
{
InvalidateSystemCachesExtended(false);
}
void
InvalidateSystemCachesExtended(bool debug_discard)
{ {
int i; int i;
InvalidateCatalogSnapshot(); InvalidateCatalogSnapshot();
ResetCatalogCaches(); ResetCatalogCaches();
RelationCacheInvalidate(); /* gets smgr and relmap too */ RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */
for (i = 0; i < syscache_callback_count; i++) for (i = 0; i < syscache_callback_count; i++)
{ {
@ -759,7 +765,7 @@ AcceptInvalidationMessages(void)
if (recursion_depth < debug_discard_caches) if (recursion_depth < debug_discard_caches)
{ {
recursion_depth++; recursion_depth++;
InvalidateSystemCaches(); InvalidateSystemCachesExtended(true);
recursion_depth--; recursion_depth--;
} }
} }

View File

@ -150,6 +150,24 @@ bool criticalSharedRelcachesBuilt = false;
*/ */
static long relcacheInvalsReceived = 0L; static long relcacheInvalsReceived = 0L;
/*
* in_progress_list is a stack of ongoing RelationBuildDesc() calls. CREATE
* INDEX CONCURRENTLY makes catalog changes under ShareUpdateExclusiveLock.
* It critically relies on each backend absorbing those changes no later than
* next transaction start. Hence, RelationBuildDesc() loops until it finishes
* without accepting a relevant invalidation. (Most invalidation consumers
* don't do this.)
*/
typedef struct inprogressent
{
Oid reloid; /* OID of relation being built */
bool invalidated; /* whether an invalidation arrived for it */
} InProgressEnt;
static InProgressEnt *in_progress_list;
static int in_progress_list_len;
static int in_progress_list_maxlen;
/* /*
* eoxact_list[] stores the OIDs of relations that (might) need AtEOXact * eoxact_list[] stores the OIDs of relations that (might) need AtEOXact
* cleanup work. This list intentionally has limited size; if it overflows, * cleanup work. This list intentionally has limited size; if it overflows,
@ -1000,6 +1018,7 @@ equalRSDesc(RowSecurityDesc *rsdesc1, RowSecurityDesc *rsdesc2)
static Relation static Relation
RelationBuildDesc(Oid targetRelId, bool insertIt) RelationBuildDesc(Oid targetRelId, bool insertIt)
{ {
int in_progress_offset;
Relation relation; Relation relation;
Oid relid; Oid relid;
HeapTuple pg_class_tuple; HeapTuple pg_class_tuple;
@ -1033,6 +1052,21 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
} }
#endif #endif
/* Register to catch invalidation messages */
if (in_progress_list_len >= in_progress_list_maxlen)
{
int allocsize;
allocsize = in_progress_list_maxlen * 2;
in_progress_list = repalloc(in_progress_list,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
}
in_progress_offset = in_progress_list_len++;
in_progress_list[in_progress_offset].reloid = targetRelId;
retry:
in_progress_list[in_progress_offset].invalidated = false;
/* /*
* find the tuple in pg_class corresponding to the given relation id * find the tuple in pg_class corresponding to the given relation id
*/ */
@ -1051,6 +1085,8 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
MemoryContextDelete(tmpcxt); MemoryContextDelete(tmpcxt);
} }
#endif #endif
Assert(in_progress_offset + 1 == in_progress_list_len);
in_progress_list_len--;
return NULL; return NULL;
} }
@ -1213,6 +1249,21 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
*/ */
heap_freetuple(pg_class_tuple); heap_freetuple(pg_class_tuple);
/*
* If an invalidation arrived mid-build, start over. Between here and the
* end of this function, don't add code that does or reasonably could read
* system catalogs. That range must be free from invalidation processing
* for the !insertIt case. For the insertIt case, RelationCacheInsert()
* will enroll this relation in ordinary relcache invalidation processing,
*/
if (in_progress_list[in_progress_offset].invalidated)
{
RelationDestroyRelation(relation, false);
goto retry;
}
Assert(in_progress_offset + 1 == in_progress_list_len);
in_progress_list_len--;
/* /*
* Insert newly created relation into relcache hash table, if requested. * Insert newly created relation into relcache hash table, if requested.
* *
@ -2566,6 +2617,14 @@ RelationClearRelation(Relation relation, bool rebuild)
/* Build temporary entry, but don't link it into hashtable */ /* Build temporary entry, but don't link it into hashtable */
newrel = RelationBuildDesc(save_relid, false); newrel = RelationBuildDesc(save_relid, false);
/*
* Between here and the end of the swap, don't add code that does or
* reasonably could read system catalogs. That range must be free
* from invalidation processing. See RelationBuildDesc() manipulation
* of in_progress_list.
*/
if (newrel == NULL) if (newrel == NULL)
{ {
/* /*
@ -2805,6 +2864,14 @@ RelationCacheInvalidateEntry(Oid relationId)
relcacheInvalsReceived++; relcacheInvalsReceived++;
RelationFlushRelation(relation); RelationFlushRelation(relation);
} }
else
{
int i;
for (i = 0; i < in_progress_list_len; i++)
if (in_progress_list[i].reloid == relationId)
in_progress_list[i].invalidated = true;
}
} }
/* /*
@ -2813,11 +2880,11 @@ RelationCacheInvalidateEntry(Oid relationId)
* and rebuild those with positive reference counts. Also reset the smgr * and rebuild those with positive reference counts. Also reset the smgr
* relation cache and re-read relation mapping data. * relation cache and re-read relation mapping data.
* *
* This is currently used only to recover from SI message buffer overflow, * Apart from debug_discard_caches, this is currently used only to recover
* so we do not touch relations having new-in-transaction relfilenodes; they * from SI message buffer overflow, so we do not touch relations having
* cannot be targets of cross-backend SI updates (and our own updates now go * new-in-transaction relfilenodes; they cannot be targets of cross-backend
* through a separate linked list that isn't limited by the SI message * SI updates (and our own updates now go through a separate linked list
* buffer size). * that isn't limited by the SI message buffer size).
* *
* We do this in two phases: the first pass deletes deletable items, and * We do this in two phases: the first pass deletes deletable items, and
* the second one rebuilds the rebuildable items. This is essential for * the second one rebuilds the rebuildable items. This is essential for
@ -2835,9 +2902,14 @@ RelationCacheInvalidateEntry(Oid relationId)
* second pass processes nailed-in-cache items before other nondeletable * second pass processes nailed-in-cache items before other nondeletable
* items. This should ensure that system catalogs are up to date before * items. This should ensure that system catalogs are up to date before
* we attempt to use them to reload information about other open relations. * we attempt to use them to reload information about other open relations.
*
* After those two phases of work having immediate effects, we normally
* signal any RelationBuildDesc() on the stack to start over. However, we
* don't do this if called as part of debug_discard_caches. Otherwise,
* RelationBuildDesc() would become an infinite loop.
*/ */
void void
RelationCacheInvalidate(void) RelationCacheInvalidate(bool debug_discard)
{ {
HASH_SEQ_STATUS status; HASH_SEQ_STATUS status;
RelIdCacheEnt *idhentry; RelIdCacheEnt *idhentry;
@ -2845,6 +2917,7 @@ RelationCacheInvalidate(void)
List *rebuildFirstList = NIL; List *rebuildFirstList = NIL;
List *rebuildList = NIL; List *rebuildList = NIL;
ListCell *l; ListCell *l;
int i;
/* /*
* Reload relation mapping data before starting to reconstruct cache. * Reload relation mapping data before starting to reconstruct cache.
@ -2931,6 +3004,11 @@ RelationCacheInvalidate(void)
RelationClearRelation(relation, true); RelationClearRelation(relation, true);
} }
list_free(rebuildList); list_free(rebuildList);
if (!debug_discard)
/* Any RelationBuildDesc() on the stack must start over. */
for (i = 0; i < in_progress_list_len; i++)
in_progress_list[i].invalidated = true;
} }
/* /*
@ -3081,6 +3159,13 @@ AtEOXact_RelationCache(bool isCommit)
RelIdCacheEnt *idhentry; RelIdCacheEnt *idhentry;
int i; int i;
/*
* Forget in_progress_list. This is relevant when we're aborting due to
* an error during RelationBuildDesc().
*/
Assert(in_progress_list_len == 0 || !isCommit);
in_progress_list_len = 0;
/* /*
* Unless the eoxact_list[] overflowed, we only need to examine the rels * Unless the eoxact_list[] overflowed, we only need to examine the rels
* listed in it. Otherwise fall back on a hash_seq_search scan. * listed in it. Otherwise fall back on a hash_seq_search scan.
@ -3227,6 +3312,14 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
RelIdCacheEnt *idhentry; RelIdCacheEnt *idhentry;
int i; int i;
/*
* Forget in_progress_list. This is relevant when we're aborting due to
* an error during RelationBuildDesc(). We don't commit subtransactions
* during RelationBuildDesc().
*/
Assert(in_progress_list_len == 0 || !isCommit);
in_progress_list_len = 0;
/* /*
* Unless the eoxact_list[] overflowed, we only need to examine the rels * Unless the eoxact_list[] overflowed, we only need to examine the rels
* listed in it. Otherwise fall back on a hash_seq_search scan. Same * listed in it. Otherwise fall back on a hash_seq_search scan. Same
@ -3775,6 +3868,7 @@ void
RelationCacheInitialize(void) RelationCacheInitialize(void)
{ {
HASHCTL ctl; HASHCTL ctl;
int allocsize;
/* /*
* make sure cache memory context exists * make sure cache memory context exists
@ -3790,6 +3884,15 @@ RelationCacheInitialize(void)
RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE, RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,
&ctl, HASH_ELEM | HASH_BLOBS); &ctl, HASH_ELEM | HASH_BLOBS);
/*
* reserve enough in_progress_list slots for many cases
*/
allocsize = 4;
in_progress_list =
MemoryContextAlloc(CacheMemoryContext,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
/* /*
* relation mapper needs to be initialized too * relation mapper needs to be initialized too
*/ */

View File

@ -14,54 +14,6 @@ my $node = PostgresNode->new('main');
$node->init; $node->init;
$node->start; $node->start;
# invoke pgbench, with parameters:
# $opts: options as a string to be split on spaces
# $stat: expected exit status
# $out: reference to a regexp list that must match stdout
# $err: reference to a regexp list that must match stderr
# $name: name of test for error messages
# $files: reference to filename/contents dictionary
# @args: further raw options or arguments
sub pgbench
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
my ($opts, $stat, $out, $err, $name, $files, @args) = @_;
my @cmd = ('pgbench', split /\s+/, $opts);
my @filenames = ();
if (defined $files)
{
# note: files are ordered for determinism
for my $fn (sort keys %$files)
{
my $filename = $node->basedir . '/' . $fn;
push @cmd, '-f', $filename;
# cleanup file weight
$filename =~ s/\@\d+$//;
#push @filenames, $filename;
# filenames are expected to be unique on a test
if (-e $filename)
{
ok(0, "$filename must not already exist");
unlink $filename or die "cannot unlink $filename: $!";
}
append_to_file($filename, $$files{$fn});
}
}
push @cmd, @args;
$node->command_checks_all(\@cmd, $stat, $out, $err, $name);
# cleanup?
#unlink @filenames or die "cannot unlink files (@filenames): $!";
return;
}
# tablespace for testing, because partitioned tables cannot use pg_default # tablespace for testing, because partitioned tables cannot use pg_default
# explicitly and we want to test that table creation with tablespace works # explicitly and we want to test that table creation with tablespace works
# for partitioned tables. # for partitioned tables.
@ -77,7 +29,7 @@ $node->safe_psql('postgres',
# Test concurrent OID generation via pg_enum_oid_index. This indirectly # Test concurrent OID generation via pg_enum_oid_index. This indirectly
# exercises LWLock and spinlock concurrency. # exercises LWLock and spinlock concurrency.
my $labels = join ',', map { "'l$_'" } 1 .. 1000; my $labels = join ',', map { "'l$_'" } 1 .. 1000;
pgbench( $node->pgbench(
'--no-vacuum --client=5 --protocol=prepared --transactions=25', '--no-vacuum --client=5 --protocol=prepared --transactions=25',
0, 0,
[qr{processed: 125/125}], [qr{processed: 125/125}],
@ -89,7 +41,7 @@ pgbench(
}); });
# Trigger various connection errors # Trigger various connection errors
pgbench( $node->pgbench(
'no-such-database', 'no-such-database',
1, 1,
[qr{^$}], [qr{^$}],
@ -99,13 +51,13 @@ pgbench(
], ],
'no such database'); 'no such database');
pgbench( $node->pgbench(
'-S -t 1', 1, [], '-S -t 1', 1, [],
[qr{Perhaps you need to do initialization}], [qr{Perhaps you need to do initialization}],
'run without init'); 'run without init');
# Initialize pgbench tables scale 1 # Initialize pgbench tables scale 1
pgbench( $node->pgbench(
'-i', 0, '-i', 0,
[qr{^$}], [qr{^$}],
[ [
@ -117,7 +69,7 @@ pgbench(
'pgbench scale 1 initialization',); 'pgbench scale 1 initialization',);
# Again, with all possible options # Again, with all possible options
pgbench( $node->pgbench(
'--initialize --init-steps=dtpvg --scale=1 --unlogged-tables --fillfactor=98 --foreign-keys --quiet --tablespace=regress_pgbench_tap_1_ts --index-tablespace=regress_pgbench_tap_1_ts --partitions=2 --partition-method=hash', '--initialize --init-steps=dtpvg --scale=1 --unlogged-tables --fillfactor=98 --foreign-keys --quiet --tablespace=regress_pgbench_tap_1_ts --index-tablespace=regress_pgbench_tap_1_ts --partitions=2 --partition-method=hash',
0, 0,
[qr{^$}i], [qr{^$}i],
@ -134,7 +86,7 @@ pgbench(
'pgbench scale 1 initialization'); 'pgbench scale 1 initialization');
# Test interaction of --init-steps with legacy step-selection options # Test interaction of --init-steps with legacy step-selection options
pgbench( $node->pgbench(
'--initialize --init-steps=dtpvGvv --no-vacuum --foreign-keys --unlogged-tables --partitions=3', '--initialize --init-steps=dtpvGvv --no-vacuum --foreign-keys --unlogged-tables --partitions=3',
0, 0,
[qr{^$}], [qr{^$}],
@ -151,7 +103,7 @@ pgbench(
'pgbench --init-steps'); 'pgbench --init-steps');
# Run all builtin scripts, for a few transactions each # Run all builtin scripts, for a few transactions each
pgbench( $node->pgbench(
'--transactions=5 -Dfoo=bla --client=2 --protocol=simple --builtin=t' '--transactions=5 -Dfoo=bla --client=2 --protocol=simple --builtin=t'
. ' --connect -n -v -n', . ' --connect -n -v -n',
0, 0,
@ -164,7 +116,7 @@ pgbench(
[qr{^$}], [qr{^$}],
'pgbench tpcb-like'); 'pgbench tpcb-like');
pgbench( $node->pgbench(
'--transactions=20 --client=5 -M extended --builtin=si -C --no-vacuum -s 1', '--transactions=20 --client=5 -M extended --builtin=si -C --no-vacuum -s 1',
0, 0,
[ [
@ -177,7 +129,7 @@ pgbench(
[qr{scale option ignored}], [qr{scale option ignored}],
'pgbench simple update'); 'pgbench simple update');
pgbench( $node->pgbench(
'-t 100 -c 7 -M prepared -b se --debug', '-t 100 -c 7 -M prepared -b se --debug',
0, 0,
[ [
@ -203,7 +155,7 @@ my $nthreads = 2;
} }
# run custom scripts # run custom scripts
pgbench( $node->pgbench(
"-t 100 -c 1 -j $nthreads -M prepared -n", "-t 100 -c 1 -j $nthreads -M prepared -n",
0, 0,
[ [
@ -233,7 +185,7 @@ COMMIT;
} }
}); });
pgbench( $node->pgbench(
'-n -t 10 -c 1 -M simple', '-n -t 10 -c 1 -M simple',
0, 0,
[ [
@ -254,7 +206,7 @@ COMMIT;
} }
}); });
pgbench( $node->pgbench(
'-n -t 10 -c 2 -M extended', '-n -t 10 -c 2 -M extended',
0, 0,
[ [
@ -285,7 +237,7 @@ $node->append_conf('postgresql.conf',
. "log_parameter_max_length = 0\n" . "log_parameter_max_length = 0\n"
. "log_parameter_max_length_on_error = 0"); . "log_parameter_max_length_on_error = 0");
$node->reload; $node->reload;
pgbench( $node->pgbench(
'-n -t1 -c1 -M prepared', '-n -t1 -c1 -M prepared',
2, 2,
[], [],
@ -312,7 +264,7 @@ $node->append_conf('postgresql.conf',
"log_parameter_max_length = -1\n" "log_parameter_max_length = -1\n"
. "log_parameter_max_length_on_error = 64"); . "log_parameter_max_length_on_error = 64");
$node->reload; $node->reload;
pgbench( $node->pgbench(
'-n -t1 -c1 -M prepared', '-n -t1 -c1 -M prepared',
2, 2,
[], [],
@ -326,7 +278,7 @@ pgbench(
SELECT 1 / (random() / 2)::int, :one::int, :two::int; SELECT 1 / (random() / 2)::int, :one::int, :two::int;
} }
}); });
pgbench( $node->pgbench(
'-n -t1 -c1 -M prepared', '-n -t1 -c1 -M prepared',
2, 2,
[], [],
@ -354,7 +306,7 @@ $node->append_conf('postgresql.conf',
. "log_parameter_max_length = 7\n" . "log_parameter_max_length = 7\n"
. "log_parameter_max_length_on_error = -1"); . "log_parameter_max_length_on_error = -1");
$node->reload; $node->reload;
pgbench( $node->pgbench(
'-n -t1 -c1 -M prepared', '-n -t1 -c1 -M prepared',
2, 2,
[], [],
@ -371,7 +323,7 @@ SELECT 1 / (random() / 2)::int, :one::int, :two::int;
$node->append_conf('postgresql.conf', "log_min_duration_statement = 0"); $node->append_conf('postgresql.conf', "log_min_duration_statement = 0");
$node->reload; $node->reload;
pgbench( $node->pgbench(
'-n -t1 -c1 -M prepared', '-n -t1 -c1 -M prepared',
2, 2,
[], [],
@ -394,7 +346,7 @@ like(
$log = undef; $log = undef;
# Check that bad parameters are reported during typinput phase of BIND # Check that bad parameters are reported during typinput phase of BIND
pgbench( $node->pgbench(
'-n -t1 -c1 -M prepared', '-n -t1 -c1 -M prepared',
2, 2,
[], [],
@ -418,7 +370,7 @@ $node->reload;
# test expressions # test expressions
# command 1..3 and 23 depend on random seed which is used to call srandom. # command 1..3 and 23 depend on random seed which is used to call srandom.
pgbench( $node->pgbench(
'--random-seed=5432 -t 1 -Dfoo=-10.1 -Dbla=false -Di=+3 -Dn=null -Dt=t -Df=of -Dd=1.0', '--random-seed=5432 -t 1 -Dfoo=-10.1 -Dbla=false -Di=+3 -Dn=null -Dt=t -Df=of -Dd=1.0',
0, 0,
[ qr{type: .*/001_pgbench_expressions}, qr{processed: 1/1} ], [ qr{type: .*/001_pgbench_expressions}, qr{processed: 1/1} ],
@ -653,7 +605,7 @@ $node->safe_psql('postgres',
my $seed = int(rand(1000000000)); my $seed = int(rand(1000000000));
for my $i (1, 2) for my $i (1, 2)
{ {
pgbench( $node->pgbench(
"--random-seed=$seed -t 1", "--random-seed=$seed -t 1",
0, 0,
[qr{processed: 1/1}], [qr{processed: 1/1}],
@ -693,7 +645,7 @@ ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/,
$node->safe_psql('postgres', 'DROP TABLE seeded_random;'); $node->safe_psql('postgres', 'DROP TABLE seeded_random;');
# backslash commands # backslash commands
pgbench( $node->pgbench(
'-t 1', 0, '-t 1', 0,
[ [
qr{type: .*/001_pgbench_backslash_commands}, qr{type: .*/001_pgbench_backslash_commands},
@ -722,7 +674,7 @@ pgbench(
}); });
# working \gset # working \gset
pgbench( $node->pgbench(
'-t 1', 0, '-t 1', 0,
[ qr{type: .*/001_pgbench_gset}, qr{processed: 1/1} ], [ qr{type: .*/001_pgbench_gset}, qr{processed: 1/1} ],
[ [
@ -757,7 +709,7 @@ SELECT 0 AS i4, 4 AS i4 \gset
} }
}); });
# \gset cannot accept more than one row, causing command to fail. # \gset cannot accept more than one row, causing command to fail.
pgbench( $node->pgbench(
'-t 1', 2, '-t 1', 2,
[ qr{type: .*/001_pgbench_gset_two_rows}, qr{processed: 0/1} ], [ qr{type: .*/001_pgbench_gset_two_rows}, qr{processed: 0/1} ],
[qr{expected one row, got 2\b}], [qr{expected one row, got 2\b}],
@ -770,7 +722,7 @@ SELECT 5432 AS fail UNION SELECT 5433 ORDER BY 1 \gset
# working \aset # working \aset
# Valid cases. # Valid cases.
pgbench( $node->pgbench(
'-t 1', 0, '-t 1', 0,
[ qr{type: .*/001_pgbench_aset}, qr{processed: 1/1} ], [ qr{type: .*/001_pgbench_aset}, qr{processed: 1/1} ],
[ qr{command=3.: int 8\b}, qr{command=4.: int 7\b} ], [ qr{command=3.: int 8\b}, qr{command=4.: int 7\b} ],
@ -786,7 +738,7 @@ SELECT 8 AS i6 UNION SELECT 9 ORDER BY 1 DESC \aset
} }
}); });
# Empty result set with \aset, causing command to fail. # Empty result set with \aset, causing command to fail.
pgbench( $node->pgbench(
'-t 1', 2, '-t 1', 2,
[ qr{type: .*/001_pgbench_aset_empty}, qr{processed: 0/1} ], [ qr{type: .*/001_pgbench_aset_empty}, qr{processed: 0/1} ],
[ [
@ -803,7 +755,7 @@ pgbench(
}); });
# Working \startpipeline # Working \startpipeline
pgbench( $node->pgbench(
'-t 1 -n -M extended', '-t 1 -n -M extended',
0, 0,
[ qr{type: .*/001_pgbench_pipeline}, qr{actually processed: 1/1} ], [ qr{type: .*/001_pgbench_pipeline}, qr{actually processed: 1/1} ],
@ -819,7 +771,7 @@ pgbench(
}); });
# Working \startpipeline in prepared query mode # Working \startpipeline in prepared query mode
pgbench( $node->pgbench(
'-t 1 -n -M prepared', '-t 1 -n -M prepared',
0, 0,
[ qr{type: .*/001_pgbench_pipeline_prep}, qr{actually processed: 1/1} ], [ qr{type: .*/001_pgbench_pipeline_prep}, qr{actually processed: 1/1} ],
@ -835,7 +787,7 @@ pgbench(
}); });
# Try \startpipeline twice # Try \startpipeline twice
pgbench( $node->pgbench(
'-t 1 -n -M extended', '-t 1 -n -M extended',
2, 2,
[], [],
@ -850,7 +802,7 @@ pgbench(
}); });
# Try to end a pipeline that hasn't started # Try to end a pipeline that hasn't started
pgbench( $node->pgbench(
'-t 1 -n -M extended', '-t 1 -n -M extended',
2, 2,
[], [],
@ -864,7 +816,7 @@ pgbench(
}); });
# Try \gset in pipeline mode # Try \gset in pipeline mode
pgbench( $node->pgbench(
'-t 1 -n -M extended', '-t 1 -n -M extended',
2, 2,
[], [],
@ -1129,7 +1081,7 @@ for my $e (@errors)
$status != 0 or die "invalid expected status for test \"$name\""; $status != 0 or die "invalid expected status for test \"$name\"";
my $n = '001_pgbench_error_' . $name; my $n = '001_pgbench_error_' . $name;
$n =~ s/ /_/g; $n =~ s/ /_/g;
pgbench( $node->pgbench(
'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX' '-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX'
. ' -Dmaxint=9223372036854775807 -Dminint=-9223372036854775808' . ' -Dmaxint=9223372036854775807 -Dminint=-9223372036854775808'
. ($no_prepare ? '' : ' -M prepared'), . ($no_prepare ? '' : ' -M prepared'),
@ -1141,14 +1093,14 @@ for my $e (@errors)
} }
# throttling # throttling
pgbench( $node->pgbench(
'-t 100 -S --rate=100000 --latency-limit=1000000 -c 2 -n -r', '-t 100 -S --rate=100000 --latency-limit=1000000 -c 2 -n -r',
0, 0,
[ qr{processed: 200/200}, qr{builtin: select only} ], [ qr{processed: 200/200}, qr{builtin: select only} ],
[qr{^$}], [qr{^$}],
'pgbench throttling'); 'pgbench throttling');
pgbench( $node->pgbench(
# given the expected rate and the 2 ms tx duration, at most one is executed # given the expected rate and the 2 ms tx duration, at most one is executed
'-t 10 --rate=100000 --latency-limit=1 -n -r', '-t 10 --rate=100000 --latency-limit=1 -n -r',
@ -1220,7 +1172,7 @@ sub check_pgbench_logs
my $bdir = $node->basedir; my $bdir = $node->basedir;
# Run with sampling rate, 2 clients with 50 transactions each. # Run with sampling rate, 2 clients with 50 transactions each.
pgbench( $node->pgbench(
"-n -S -t 50 -c 2 --log --sampling-rate=0.5", 0, "-n -S -t 50 -c 2 --log --sampling-rate=0.5", 0,
[ qr{select only}, qr{processed: 100/100} ], [qr{^$}], [ qr{select only}, qr{processed: 100/100} ], [qr{^$}],
'pgbench logs', undef, 'pgbench logs', undef,
@ -1230,7 +1182,7 @@ check_pgbench_logs($bdir, '001_pgbench_log_2', 1, 8, 92,
qr{^[01] \d{1,2} \d+ \d \d+ \d+$}); qr{^[01] \d{1,2} \d+ \d \d+ \d+$});
# Run with different read-only option pattern, 1 client with 10 transactions. # Run with different read-only option pattern, 1 client with 10 transactions.
pgbench( $node->pgbench(
"-n -b select-only -t 10 -l", 0, "-n -b select-only -t 10 -l", 0,
[ qr{select only}, qr{processed: 10/10} ], [qr{^$}], [ qr{select only}, qr{processed: 10/10} ], [qr{^$}],
'pgbench logs contents', undef, 'pgbench logs contents', undef,

View File

@ -62,6 +62,7 @@ extern void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
extern void CallSyscacheCallbacks(int cacheid, uint32 hashvalue); extern void CallSyscacheCallbacks(int cacheid, uint32 hashvalue);
extern void InvalidateSystemCaches(void); extern void InvalidateSystemCaches(void);
extern void InvalidateSystemCachesExtended(bool debug_discard);
extern void LogLogicalInvalidations(void); extern void LogLogicalInvalidations(void);
#endif /* INVAL_H */ #endif /* INVAL_H */

View File

@ -122,7 +122,7 @@ extern void RelationForgetRelation(Oid rid);
extern void RelationCacheInvalidateEntry(Oid relationId); extern void RelationCacheInvalidateEntry(Oid relationId);
extern void RelationCacheInvalidate(void); extern void RelationCacheInvalidate(bool debug_discard);
extern void RelationCloseSmgrByOid(Oid relationId); extern void RelationCloseSmgrByOid(Oid relationId);

View File

@ -1982,6 +1982,140 @@ sub interactive_psql
return $harness; return $harness;
} }
# Common sub of pgbench-invoking interfaces. Makes any requested script files
# and returns pgbench command-line options causing use of those files.
sub _pgbench_make_files
{
my ($self, $files) = @_;
my @file_opts;
if (defined $files)
{
# note: files are ordered for determinism
for my $fn (sort keys %$files)
{
my $filename = $self->basedir . '/' . $fn;
push @file_opts, '-f', $filename;
# cleanup file weight
$filename =~ s/\@\d+$//;
#push @filenames, $filename;
# filenames are expected to be unique on a test
if (-e $filename)
{
ok(0, "$filename must not already exist");
unlink $filename or die "cannot unlink $filename: $!";
}
TestLib::append_to_file($filename, $$files{$fn});
}
}
return @file_opts;
}
=pod
=item $node->pgbench($opts, $stat, $out, $err, $name, $files, @args)
Invoke B<pgbench>, with parameters and files.
=over
=item $opts
Options as a string to be split on spaces.
=item $stat
Expected exit status.
=item $out
Reference to a regexp list that must match stdout.
=item $err
Reference to a regexp list that must match stderr.
=item $name
Name of test for error messages.
=item $files
Reference to filename/contents dictionary.
=item @args
Further raw options or arguments.
=back
=cut
sub pgbench
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
my ($self, $opts, $stat, $out, $err, $name, $files, @args) = @_;
my @cmd = (
'pgbench',
split(/\s+/, $opts),
$self->_pgbench_make_files($files), @args);
$self->command_checks_all(\@cmd, $stat, $out, $err, $name);
}
=pod
=item $node->background_pgbench($opts, $files, \$stdout, $timer) => harness
Invoke B<pgbench> and return an IPC::Run harness object. The process's stdin
is empty, and its stdout and stderr go to the $stdout scalar reference. This
allows the caller to act on other parts of the system while B<pgbench> is
running. Errors from B<pgbench> are the caller's problem.
The specified timer object is attached to the harness, as well. It's caller's
responsibility to select the timeout length, and to restart the timer after
each command if the timeout is per-command.
Be sure to "finish" the harness when done with it.
=over
=item $opts
Options as a string to be split on spaces.
=item $files
Reference to filename/contents dictionary.
=back
=cut
sub background_pgbench
{
my ($self, $opts, $files, $stdout, $timer) = @_;
my @cmd =
('pgbench', split(/\s+/, $opts), $self->_pgbench_make_files($files));
local %ENV = $self->_get_env();
my $stdin = "";
# IPC::Run would otherwise append to existing contents:
$$stdout = "" if ref($stdout);
my $harness = IPC::Run::start \@cmd, '<', \$stdin, '>', $stdout, '2>&1',
$timer;
return $harness;
}
=pod =pod
=item $node->connect_ok($connstr, $test_name, %params) =item $node->connect_ok($connstr, $test_name, %params)

View File

@ -1093,6 +1093,7 @@ ImportForeignSchemaStmt
ImportForeignSchemaType ImportForeignSchemaType
ImportForeignSchema_function ImportForeignSchema_function
ImportQual ImportQual
InProgressEnt
IncludeWal IncludeWal
InclusionOpaque InclusionOpaque
IncrementVarSublevelsUp_context IncrementVarSublevelsUp_context