diff --git a/contrib/amcheck/t/002_cic.pl b/contrib/amcheck/t/002_cic.pl new file mode 100644 index 0000000000..fd2dbc3366 --- /dev/null +++ b/contrib/amcheck/t/002_cic.pl @@ -0,0 +1,78 @@ + +# Copyright (c) 2021, PostgreSQL Global Development Group + +# Test CREATE INDEX CONCURRENTLY with concurrent modifications +use strict; +use warnings; + +use Config; +use PostgresNode; +use TestLib; + +use Test::More tests => 4; + +my ($node, $result); + +# +# Test set-up +# +$node = PostgresNode->new('CIC_test'); +$node->init; +$node->append_conf('postgresql.conf', 'lock_timeout = 180000'); +$node->start; +$node->safe_psql('postgres', q(CREATE EXTENSION amcheck)); +$node->safe_psql('postgres', q(CREATE TABLE tbl(i int))); +$node->safe_psql('postgres', q(CREATE INDEX idx ON tbl(i))); + +# +# Stress CIC with pgbench +# + +# Run background pgbench with CIC. We cannot mix-in this script into single +# pgbench: CIC will deadlock with itself occasionally. +my $pgbench_out = ''; +my $pgbench_timer = IPC::Run::timeout(180); +my $pgbench_h = $node->background_pgbench( + '--no-vacuum --client=1 --transactions=200', + { + '002_pgbench_concurrent_cic' => q( + DROP INDEX CONCURRENTLY idx; + CREATE INDEX CONCURRENTLY idx ON tbl(i); + SELECT bt_index_check('idx',true); + ) + }, + \$pgbench_out, + $pgbench_timer); + +# Run pgbench. +$node->pgbench( + '--no-vacuum --client=5 --transactions=200', + 0, + [qr{actually processed}], + [qr{^$}], + 'concurrent INSERTs', + { + '002_pgbench_concurrent_transaction' => q( + BEGIN; + INSERT INTO tbl VALUES(0); + COMMIT; + ), + '002_pgbench_concurrent_transaction_savepoints' => q( + BEGIN; + SAVEPOINT s1; + INSERT INTO tbl VALUES(0); + COMMIT; + ) + }); + +$pgbench_h->pump_nb; +$pgbench_h->finish(); +$result = + ($Config{osname} eq "MSWin32") + ? ($pgbench_h->full_results)[0] + : $pgbench_h->result(0); +is($result, 0, "pgbench with CIC works"); + +# done +$node->stop; +done_testing(); diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index 9352c68090..dd8586ab4d 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -628,7 +628,7 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg) int i; if (msg->rc.relId == InvalidOid) - RelationCacheInvalidate(); + RelationCacheInvalidate(false); else RelationCacheInvalidateEntry(msg->rc.relId); @@ -685,12 +685,18 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg) */ void InvalidateSystemCaches(void) +{ + InvalidateSystemCachesExtended(false); +} + +void +InvalidateSystemCachesExtended(bool debug_discard) { int i; InvalidateCatalogSnapshot(); ResetCatalogCaches(); - RelationCacheInvalidate(); /* gets smgr and relmap too */ + RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */ for (i = 0; i < syscache_callback_count; i++) { @@ -759,7 +765,7 @@ AcceptInvalidationMessages(void) if (recursion_depth < debug_discard_caches) { recursion_depth++; - InvalidateSystemCaches(); + InvalidateSystemCachesExtended(true); recursion_depth--; } } diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 13d9994af3..b54c911766 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -150,6 +150,24 @@ bool criticalSharedRelcachesBuilt = false; */ static long relcacheInvalsReceived = 0L; +/* + * in_progress_list is a stack of ongoing RelationBuildDesc() calls. CREATE + * INDEX CONCURRENTLY makes catalog changes under ShareUpdateExclusiveLock. + * It critically relies on each backend absorbing those changes no later than + * next transaction start. Hence, RelationBuildDesc() loops until it finishes + * without accepting a relevant invalidation. (Most invalidation consumers + * don't do this.) + */ +typedef struct inprogressent +{ + Oid reloid; /* OID of relation being built */ + bool invalidated; /* whether an invalidation arrived for it */ +} InProgressEnt; + +static InProgressEnt *in_progress_list; +static int in_progress_list_len; +static int in_progress_list_maxlen; + /* * eoxact_list[] stores the OIDs of relations that (might) need AtEOXact * cleanup work. This list intentionally has limited size; if it overflows, @@ -1000,6 +1018,7 @@ equalRSDesc(RowSecurityDesc *rsdesc1, RowSecurityDesc *rsdesc2) static Relation RelationBuildDesc(Oid targetRelId, bool insertIt) { + int in_progress_offset; Relation relation; Oid relid; HeapTuple pg_class_tuple; @@ -1033,6 +1052,21 @@ RelationBuildDesc(Oid targetRelId, bool insertIt) } #endif + /* Register to catch invalidation messages */ + if (in_progress_list_len >= in_progress_list_maxlen) + { + int allocsize; + + allocsize = in_progress_list_maxlen * 2; + in_progress_list = repalloc(in_progress_list, + allocsize * sizeof(*in_progress_list)); + in_progress_list_maxlen = allocsize; + } + in_progress_offset = in_progress_list_len++; + in_progress_list[in_progress_offset].reloid = targetRelId; +retry: + in_progress_list[in_progress_offset].invalidated = false; + /* * find the tuple in pg_class corresponding to the given relation id */ @@ -1051,6 +1085,8 @@ RelationBuildDesc(Oid targetRelId, bool insertIt) MemoryContextDelete(tmpcxt); } #endif + Assert(in_progress_offset + 1 == in_progress_list_len); + in_progress_list_len--; return NULL; } @@ -1213,6 +1249,21 @@ RelationBuildDesc(Oid targetRelId, bool insertIt) */ heap_freetuple(pg_class_tuple); + /* + * If an invalidation arrived mid-build, start over. Between here and the + * end of this function, don't add code that does or reasonably could read + * system catalogs. That range must be free from invalidation processing + * for the !insertIt case. For the insertIt case, RelationCacheInsert() + * will enroll this relation in ordinary relcache invalidation processing, + */ + if (in_progress_list[in_progress_offset].invalidated) + { + RelationDestroyRelation(relation, false); + goto retry; + } + Assert(in_progress_offset + 1 == in_progress_list_len); + in_progress_list_len--; + /* * Insert newly created relation into relcache hash table, if requested. * @@ -2566,6 +2617,14 @@ RelationClearRelation(Relation relation, bool rebuild) /* Build temporary entry, but don't link it into hashtable */ newrel = RelationBuildDesc(save_relid, false); + + /* + * Between here and the end of the swap, don't add code that does or + * reasonably could read system catalogs. That range must be free + * from invalidation processing. See RelationBuildDesc() manipulation + * of in_progress_list. + */ + if (newrel == NULL) { /* @@ -2805,6 +2864,14 @@ RelationCacheInvalidateEntry(Oid relationId) relcacheInvalsReceived++; RelationFlushRelation(relation); } + else + { + int i; + + for (i = 0; i < in_progress_list_len; i++) + if (in_progress_list[i].reloid == relationId) + in_progress_list[i].invalidated = true; + } } /* @@ -2813,11 +2880,11 @@ RelationCacheInvalidateEntry(Oid relationId) * and rebuild those with positive reference counts. Also reset the smgr * relation cache and re-read relation mapping data. * - * This is currently used only to recover from SI message buffer overflow, - * so we do not touch relations having new-in-transaction relfilenodes; they - * cannot be targets of cross-backend SI updates (and our own updates now go - * through a separate linked list that isn't limited by the SI message - * buffer size). + * Apart from debug_discard_caches, this is currently used only to recover + * from SI message buffer overflow, so we do not touch relations having + * new-in-transaction relfilenodes; they cannot be targets of cross-backend + * SI updates (and our own updates now go through a separate linked list + * that isn't limited by the SI message buffer size). * * We do this in two phases: the first pass deletes deletable items, and * the second one rebuilds the rebuildable items. This is essential for @@ -2835,9 +2902,14 @@ RelationCacheInvalidateEntry(Oid relationId) * second pass processes nailed-in-cache items before other nondeletable * items. This should ensure that system catalogs are up to date before * we attempt to use them to reload information about other open relations. + * + * After those two phases of work having immediate effects, we normally + * signal any RelationBuildDesc() on the stack to start over. However, we + * don't do this if called as part of debug_discard_caches. Otherwise, + * RelationBuildDesc() would become an infinite loop. */ void -RelationCacheInvalidate(void) +RelationCacheInvalidate(bool debug_discard) { HASH_SEQ_STATUS status; RelIdCacheEnt *idhentry; @@ -2845,6 +2917,7 @@ RelationCacheInvalidate(void) List *rebuildFirstList = NIL; List *rebuildList = NIL; ListCell *l; + int i; /* * Reload relation mapping data before starting to reconstruct cache. @@ -2931,6 +3004,11 @@ RelationCacheInvalidate(void) RelationClearRelation(relation, true); } list_free(rebuildList); + + if (!debug_discard) + /* Any RelationBuildDesc() on the stack must start over. */ + for (i = 0; i < in_progress_list_len; i++) + in_progress_list[i].invalidated = true; } /* @@ -3081,6 +3159,13 @@ AtEOXact_RelationCache(bool isCommit) RelIdCacheEnt *idhentry; int i; + /* + * Forget in_progress_list. This is relevant when we're aborting due to + * an error during RelationBuildDesc(). + */ + Assert(in_progress_list_len == 0 || !isCommit); + in_progress_list_len = 0; + /* * Unless the eoxact_list[] overflowed, we only need to examine the rels * listed in it. Otherwise fall back on a hash_seq_search scan. @@ -3227,6 +3312,14 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, RelIdCacheEnt *idhentry; int i; + /* + * Forget in_progress_list. This is relevant when we're aborting due to + * an error during RelationBuildDesc(). We don't commit subtransactions + * during RelationBuildDesc(). + */ + Assert(in_progress_list_len == 0 || !isCommit); + in_progress_list_len = 0; + /* * Unless the eoxact_list[] overflowed, we only need to examine the rels * listed in it. Otherwise fall back on a hash_seq_search scan. Same @@ -3775,6 +3868,7 @@ void RelationCacheInitialize(void) { HASHCTL ctl; + int allocsize; /* * make sure cache memory context exists @@ -3790,6 +3884,15 @@ RelationCacheInitialize(void) RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE, &ctl, HASH_ELEM | HASH_BLOBS); + /* + * reserve enough in_progress_list slots for many cases + */ + allocsize = 4; + in_progress_list = + MemoryContextAlloc(CacheMemoryContext, + allocsize * sizeof(*in_progress_list)); + in_progress_list_maxlen = allocsize; + /* * relation mapper needs to be initialized too */ diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl index ef53f6b2d9..7ca96e58ca 100644 --- a/src/bin/pgbench/t/001_pgbench_with_server.pl +++ b/src/bin/pgbench/t/001_pgbench_with_server.pl @@ -14,54 +14,6 @@ my $node = PostgresNode->new('main'); $node->init; $node->start; -# invoke pgbench, with parameters: -# $opts: options as a string to be split on spaces -# $stat: expected exit status -# $out: reference to a regexp list that must match stdout -# $err: reference to a regexp list that must match stderr -# $name: name of test for error messages -# $files: reference to filename/contents dictionary -# @args: further raw options or arguments -sub pgbench -{ - local $Test::Builder::Level = $Test::Builder::Level + 1; - - my ($opts, $stat, $out, $err, $name, $files, @args) = @_; - my @cmd = ('pgbench', split /\s+/, $opts); - my @filenames = (); - if (defined $files) - { - - # note: files are ordered for determinism - for my $fn (sort keys %$files) - { - my $filename = $node->basedir . '/' . $fn; - push @cmd, '-f', $filename; - - # cleanup file weight - $filename =~ s/\@\d+$//; - - #push @filenames, $filename; - # filenames are expected to be unique on a test - if (-e $filename) - { - ok(0, "$filename must not already exist"); - unlink $filename or die "cannot unlink $filename: $!"; - } - append_to_file($filename, $$files{$fn}); - } - } - - push @cmd, @args; - - $node->command_checks_all(\@cmd, $stat, $out, $err, $name); - - # cleanup? - #unlink @filenames or die "cannot unlink files (@filenames): $!"; - - return; -} - # tablespace for testing, because partitioned tables cannot use pg_default # explicitly and we want to test that table creation with tablespace works # for partitioned tables. @@ -77,7 +29,7 @@ $node->safe_psql('postgres', # Test concurrent OID generation via pg_enum_oid_index. This indirectly # exercises LWLock and spinlock concurrency. my $labels = join ',', map { "'l$_'" } 1 .. 1000; -pgbench( +$node->pgbench( '--no-vacuum --client=5 --protocol=prepared --transactions=25', 0, [qr{processed: 125/125}], @@ -89,7 +41,7 @@ pgbench( }); # Trigger various connection errors -pgbench( +$node->pgbench( 'no-such-database', 1, [qr{^$}], @@ -99,13 +51,13 @@ pgbench( ], 'no such database'); -pgbench( +$node->pgbench( '-S -t 1', 1, [], [qr{Perhaps you need to do initialization}], 'run without init'); # Initialize pgbench tables scale 1 -pgbench( +$node->pgbench( '-i', 0, [qr{^$}], [ @@ -117,7 +69,7 @@ pgbench( 'pgbench scale 1 initialization',); # Again, with all possible options -pgbench( +$node->pgbench( '--initialize --init-steps=dtpvg --scale=1 --unlogged-tables --fillfactor=98 --foreign-keys --quiet --tablespace=regress_pgbench_tap_1_ts --index-tablespace=regress_pgbench_tap_1_ts --partitions=2 --partition-method=hash', 0, [qr{^$}i], @@ -134,7 +86,7 @@ pgbench( 'pgbench scale 1 initialization'); # Test interaction of --init-steps with legacy step-selection options -pgbench( +$node->pgbench( '--initialize --init-steps=dtpvGvv --no-vacuum --foreign-keys --unlogged-tables --partitions=3', 0, [qr{^$}], @@ -151,7 +103,7 @@ pgbench( 'pgbench --init-steps'); # Run all builtin scripts, for a few transactions each -pgbench( +$node->pgbench( '--transactions=5 -Dfoo=bla --client=2 --protocol=simple --builtin=t' . ' --connect -n -v -n', 0, @@ -164,7 +116,7 @@ pgbench( [qr{^$}], 'pgbench tpcb-like'); -pgbench( +$node->pgbench( '--transactions=20 --client=5 -M extended --builtin=si -C --no-vacuum -s 1', 0, [ @@ -177,7 +129,7 @@ pgbench( [qr{scale option ignored}], 'pgbench simple update'); -pgbench( +$node->pgbench( '-t 100 -c 7 -M prepared -b se --debug', 0, [ @@ -203,7 +155,7 @@ my $nthreads = 2; } # run custom scripts -pgbench( +$node->pgbench( "-t 100 -c 1 -j $nthreads -M prepared -n", 0, [ @@ -233,7 +185,7 @@ COMMIT; } }); -pgbench( +$node->pgbench( '-n -t 10 -c 1 -M simple', 0, [ @@ -254,7 +206,7 @@ COMMIT; } }); -pgbench( +$node->pgbench( '-n -t 10 -c 2 -M extended', 0, [ @@ -285,7 +237,7 @@ $node->append_conf('postgresql.conf', . "log_parameter_max_length = 0\n" . "log_parameter_max_length_on_error = 0"); $node->reload; -pgbench( +$node->pgbench( '-n -t1 -c1 -M prepared', 2, [], @@ -312,7 +264,7 @@ $node->append_conf('postgresql.conf', "log_parameter_max_length = -1\n" . "log_parameter_max_length_on_error = 64"); $node->reload; -pgbench( +$node->pgbench( '-n -t1 -c1 -M prepared', 2, [], @@ -326,7 +278,7 @@ pgbench( SELECT 1 / (random() / 2)::int, :one::int, :two::int; } }); -pgbench( +$node->pgbench( '-n -t1 -c1 -M prepared', 2, [], @@ -354,7 +306,7 @@ $node->append_conf('postgresql.conf', . "log_parameter_max_length = 7\n" . "log_parameter_max_length_on_error = -1"); $node->reload; -pgbench( +$node->pgbench( '-n -t1 -c1 -M prepared', 2, [], @@ -371,7 +323,7 @@ SELECT 1 / (random() / 2)::int, :one::int, :two::int; $node->append_conf('postgresql.conf', "log_min_duration_statement = 0"); $node->reload; -pgbench( +$node->pgbench( '-n -t1 -c1 -M prepared', 2, [], @@ -394,7 +346,7 @@ like( $log = undef; # Check that bad parameters are reported during typinput phase of BIND -pgbench( +$node->pgbench( '-n -t1 -c1 -M prepared', 2, [], @@ -418,7 +370,7 @@ $node->reload; # test expressions # command 1..3 and 23 depend on random seed which is used to call srandom. -pgbench( +$node->pgbench( '--random-seed=5432 -t 1 -Dfoo=-10.1 -Dbla=false -Di=+3 -Dn=null -Dt=t -Df=of -Dd=1.0', 0, [ qr{type: .*/001_pgbench_expressions}, qr{processed: 1/1} ], @@ -653,7 +605,7 @@ $node->safe_psql('postgres', my $seed = int(rand(1000000000)); for my $i (1, 2) { - pgbench( + $node->pgbench( "--random-seed=$seed -t 1", 0, [qr{processed: 1/1}], @@ -693,7 +645,7 @@ ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/, $node->safe_psql('postgres', 'DROP TABLE seeded_random;'); # backslash commands -pgbench( +$node->pgbench( '-t 1', 0, [ qr{type: .*/001_pgbench_backslash_commands}, @@ -722,7 +674,7 @@ pgbench( }); # working \gset -pgbench( +$node->pgbench( '-t 1', 0, [ qr{type: .*/001_pgbench_gset}, qr{processed: 1/1} ], [ @@ -757,7 +709,7 @@ SELECT 0 AS i4, 4 AS i4 \gset } }); # \gset cannot accept more than one row, causing command to fail. -pgbench( +$node->pgbench( '-t 1', 2, [ qr{type: .*/001_pgbench_gset_two_rows}, qr{processed: 0/1} ], [qr{expected one row, got 2\b}], @@ -770,7 +722,7 @@ SELECT 5432 AS fail UNION SELECT 5433 ORDER BY 1 \gset # working \aset # Valid cases. -pgbench( +$node->pgbench( '-t 1', 0, [ qr{type: .*/001_pgbench_aset}, qr{processed: 1/1} ], [ qr{command=3.: int 8\b}, qr{command=4.: int 7\b} ], @@ -786,7 +738,7 @@ SELECT 8 AS i6 UNION SELECT 9 ORDER BY 1 DESC \aset } }); # Empty result set with \aset, causing command to fail. -pgbench( +$node->pgbench( '-t 1', 2, [ qr{type: .*/001_pgbench_aset_empty}, qr{processed: 0/1} ], [ @@ -803,7 +755,7 @@ pgbench( }); # Working \startpipeline -pgbench( +$node->pgbench( '-t 1 -n -M extended', 0, [ qr{type: .*/001_pgbench_pipeline}, qr{actually processed: 1/1} ], @@ -819,7 +771,7 @@ pgbench( }); # Working \startpipeline in prepared query mode -pgbench( +$node->pgbench( '-t 1 -n -M prepared', 0, [ qr{type: .*/001_pgbench_pipeline_prep}, qr{actually processed: 1/1} ], @@ -835,7 +787,7 @@ pgbench( }); # Try \startpipeline twice -pgbench( +$node->pgbench( '-t 1 -n -M extended', 2, [], @@ -850,7 +802,7 @@ pgbench( }); # Try to end a pipeline that hasn't started -pgbench( +$node->pgbench( '-t 1 -n -M extended', 2, [], @@ -864,7 +816,7 @@ pgbench( }); # Try \gset in pipeline mode -pgbench( +$node->pgbench( '-t 1 -n -M extended', 2, [], @@ -1129,7 +1081,7 @@ for my $e (@errors) $status != 0 or die "invalid expected status for test \"$name\""; my $n = '001_pgbench_error_' . $name; $n =~ s/ /_/g; - pgbench( + $node->pgbench( '-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX' . ' -Dmaxint=9223372036854775807 -Dminint=-9223372036854775808' . ($no_prepare ? '' : ' -M prepared'), @@ -1141,14 +1093,14 @@ for my $e (@errors) } # throttling -pgbench( +$node->pgbench( '-t 100 -S --rate=100000 --latency-limit=1000000 -c 2 -n -r', 0, [ qr{processed: 200/200}, qr{builtin: select only} ], [qr{^$}], 'pgbench throttling'); -pgbench( +$node->pgbench( # given the expected rate and the 2 ms tx duration, at most one is executed '-t 10 --rate=100000 --latency-limit=1 -n -r', @@ -1220,7 +1172,7 @@ sub check_pgbench_logs my $bdir = $node->basedir; # Run with sampling rate, 2 clients with 50 transactions each. -pgbench( +$node->pgbench( "-n -S -t 50 -c 2 --log --sampling-rate=0.5", 0, [ qr{select only}, qr{processed: 100/100} ], [qr{^$}], 'pgbench logs', undef, @@ -1230,7 +1182,7 @@ check_pgbench_logs($bdir, '001_pgbench_log_2', 1, 8, 92, qr{^[01] \d{1,2} \d+ \d \d+ \d+$}); # Run with different read-only option pattern, 1 client with 10 transactions. -pgbench( +$node->pgbench( "-n -b select-only -t 10 -l", 0, [ qr{select only}, qr{processed: 10/10} ], [qr{^$}], 'pgbench logs contents', undef, diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h index 770672890b..877e66c63c 100644 --- a/src/include/utils/inval.h +++ b/src/include/utils/inval.h @@ -62,6 +62,7 @@ extern void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, extern void CallSyscacheCallbacks(int cacheid, uint32 hashvalue); extern void InvalidateSystemCaches(void); +extern void InvalidateSystemCachesExtended(bool debug_discard); extern void LogLogicalInvalidations(void); #endif /* INVAL_H */ diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h index d2c17575f6..aa060ef115 100644 --- a/src/include/utils/relcache.h +++ b/src/include/utils/relcache.h @@ -122,7 +122,7 @@ extern void RelationForgetRelation(Oid rid); extern void RelationCacheInvalidateEntry(Oid relationId); -extern void RelationCacheInvalidate(void); +extern void RelationCacheInvalidate(bool debug_discard); extern void RelationCloseSmgrByOid(Oid relationId); diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index ba80baf091..465fdb6870 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -1982,6 +1982,140 @@ sub interactive_psql return $harness; } +# Common sub of pgbench-invoking interfaces. Makes any requested script files +# and returns pgbench command-line options causing use of those files. +sub _pgbench_make_files +{ + my ($self, $files) = @_; + my @file_opts; + + if (defined $files) + { + + # note: files are ordered for determinism + for my $fn (sort keys %$files) + { + my $filename = $self->basedir . '/' . $fn; + push @file_opts, '-f', $filename; + + # cleanup file weight + $filename =~ s/\@\d+$//; + + #push @filenames, $filename; + # filenames are expected to be unique on a test + if (-e $filename) + { + ok(0, "$filename must not already exist"); + unlink $filename or die "cannot unlink $filename: $!"; + } + TestLib::append_to_file($filename, $$files{$fn}); + } + } + + return @file_opts; +} + +=pod + +=item $node->pgbench($opts, $stat, $out, $err, $name, $files, @args) + +Invoke B, with parameters and files. + +=over + +=item $opts + +Options as a string to be split on spaces. + +=item $stat + +Expected exit status. + +=item $out + +Reference to a regexp list that must match stdout. + +=item $err + +Reference to a regexp list that must match stderr. + +=item $name + +Name of test for error messages. + +=item $files + +Reference to filename/contents dictionary. + +=item @args + +Further raw options or arguments. + +=back + +=cut + +sub pgbench +{ + local $Test::Builder::Level = $Test::Builder::Level + 1; + + my ($self, $opts, $stat, $out, $err, $name, $files, @args) = @_; + my @cmd = ( + 'pgbench', + split(/\s+/, $opts), + $self->_pgbench_make_files($files), @args); + + $self->command_checks_all(\@cmd, $stat, $out, $err, $name); +} + +=pod + +=item $node->background_pgbench($opts, $files, \$stdout, $timer) => harness + +Invoke B and return an IPC::Run harness object. The process's stdin +is empty, and its stdout and stderr go to the $stdout scalar reference. This +allows the caller to act on other parts of the system while B is +running. Errors from B are the caller's problem. + +The specified timer object is attached to the harness, as well. It's caller's +responsibility to select the timeout length, and to restart the timer after +each command if the timeout is per-command. + +Be sure to "finish" the harness when done with it. + +=over + +=item $opts + +Options as a string to be split on spaces. + +=item $files + +Reference to filename/contents dictionary. + +=back + +=cut + +sub background_pgbench +{ + my ($self, $opts, $files, $stdout, $timer) = @_; + + my @cmd = + ('pgbench', split(/\s+/, $opts), $self->_pgbench_make_files($files)); + + local %ENV = $self->_get_env(); + + my $stdin = ""; + # IPC::Run would otherwise append to existing contents: + $$stdout = "" if ref($stdout); + + my $harness = IPC::Run::start \@cmd, '<', \$stdin, '>', $stdout, '2>&1', + $timer; + + return $harness; +} + =pod =item $node->connect_ok($connstr, $test_name, %params) diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index cb5b5ec74c..1c7bac0578 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -1093,6 +1093,7 @@ ImportForeignSchemaStmt ImportForeignSchemaType ImportForeignSchema_function ImportQual +InProgressEnt IncludeWal InclusionOpaque IncrementVarSublevelsUp_context