Pre-beta mechanical code beautification.

Run pgindent, pgperltidy, and reformat-dat-files.

This set of diffs is a bit larger than typical.  We've updated to
pg_bsd_indent 2.1.2, which properly indents variable declarations that
have multi-line initialization expressions (the continuation lines are
now indented one tab stop).  We've also updated to perltidy version
20230309 and changed some of its settings, which reduces its desire to
add whitespace to lines to make assignments etc. line up.  Going
forward, that should make for fewer random-seeming changes to existing
code.

Discussion: https://postgr.es/m/20230428092545.qfb3y5wcu4cm75ur@alvherre.pgsql
This commit is contained in:
Tom Lane 2023-05-19 17:24:48 -04:00
parent df6b19fbbc
commit 0245f8db36
402 changed files with 4756 additions and 4427 deletions

View File

@ -81,7 +81,7 @@ sub relation_filepath
my ($relname) = @_; my ($relname) = @_;
my $pgdata = $node->data_dir; my $pgdata = $node->data_dir;
my $rel = $node->safe_psql('postgres', my $rel = $node->safe_psql('postgres',
qq(SELECT pg_relation_filepath('$relname'))); qq(SELECT pg_relation_filepath('$relname')));
die "path not found for relation $relname" unless defined $rel; die "path not found for relation $relname" unless defined $rel;
return "$pgdata/$rel"; return "$pgdata/$rel";
@ -267,7 +267,7 @@ sub check_all_options_uncorrupted
for my $endblock (qw(NULL 0)) for my $endblock (qw(NULL 0))
{ {
my $opts = my $opts =
"on_error_stop := $stop, " "on_error_stop := $stop, "
. "check_toast := $check_toast, " . "check_toast := $check_toast, "
. "skip := $skip, " . "skip := $skip, "
. "startblock := $startblock, " . "startblock := $startblock, "

View File

@ -38,30 +38,35 @@ $node->safe_psql('postgres', q(CREATE TABLE tbl(i int)));
my $main_h = $node->background_psql('postgres'); my $main_h = $node->background_psql('postgres');
$main_h->query_safe(q( $main_h->query_safe(
q(
BEGIN; BEGIN;
INSERT INTO tbl VALUES(0); INSERT INTO tbl VALUES(0);
)); ));
my $cic_h = $node->background_psql('postgres'); my $cic_h = $node->background_psql('postgres');
$cic_h->query_until(qr/start/, q( $cic_h->query_until(
qr/start/, q(
\echo start \echo start
CREATE INDEX CONCURRENTLY idx ON tbl(i); CREATE INDEX CONCURRENTLY idx ON tbl(i);
)); ));
$main_h->query_safe(q( $main_h->query_safe(
q(
PREPARE TRANSACTION 'a'; PREPARE TRANSACTION 'a';
)); ));
$main_h->query_safe(q( $main_h->query_safe(
q(
BEGIN; BEGIN;
INSERT INTO tbl VALUES(0); INSERT INTO tbl VALUES(0);
)); ));
$node->safe_psql('postgres', q(COMMIT PREPARED 'a';)); $node->safe_psql('postgres', q(COMMIT PREPARED 'a';));
$main_h->query_safe(q( $main_h->query_safe(
q(
PREPARE TRANSACTION 'b'; PREPARE TRANSACTION 'b';
BEGIN; BEGIN;
INSERT INTO tbl VALUES(0); INSERT INTO tbl VALUES(0);
@ -69,7 +74,8 @@ INSERT INTO tbl VALUES(0);
$node->safe_psql('postgres', q(COMMIT PREPARED 'b';)); $node->safe_psql('postgres', q(COMMIT PREPARED 'b';));
$main_h->query_safe(q( $main_h->query_safe(
q(
PREPARE TRANSACTION 'c'; PREPARE TRANSACTION 'c';
COMMIT PREPARED 'c'; COMMIT PREPARED 'c';
)); ));
@ -97,7 +103,8 @@ PREPARE TRANSACTION 'persists_forever';
$node->restart; $node->restart;
my $reindex_h = $node->background_psql('postgres'); my $reindex_h = $node->background_psql('postgres');
$reindex_h->query_until(qr/start/, q( $reindex_h->query_until(
qr/start/, q(
\echo start \echo start
DROP INDEX CONCURRENTLY idx; DROP INDEX CONCURRENTLY idx;
CREATE INDEX CONCURRENTLY idx ON tbl(i); CREATE INDEX CONCURRENTLY idx ON tbl(i);

View File

@ -407,7 +407,7 @@ verify_heapam(PG_FUNCTION_ARGS)
OffsetNumber successor[MaxOffsetNumber]; OffsetNumber successor[MaxOffsetNumber];
bool lp_valid[MaxOffsetNumber]; bool lp_valid[MaxOffsetNumber];
bool xmin_commit_status_ok[MaxOffsetNumber]; bool xmin_commit_status_ok[MaxOffsetNumber];
XidCommitStatus xmin_commit_status[MaxOffsetNumber]; XidCommitStatus xmin_commit_status[MaxOffsetNumber];
CHECK_FOR_INTERRUPTS(); CHECK_FOR_INTERRUPTS();
@ -444,7 +444,7 @@ verify_heapam(PG_FUNCTION_ARGS)
for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff; for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff;
ctx.offnum = OffsetNumberNext(ctx.offnum)) ctx.offnum = OffsetNumberNext(ctx.offnum))
{ {
BlockNumber nextblkno; BlockNumber nextblkno;
OffsetNumber nextoffnum; OffsetNumber nextoffnum;
successor[ctx.offnum] = InvalidOffsetNumber; successor[ctx.offnum] = InvalidOffsetNumber;
@ -484,9 +484,9 @@ verify_heapam(PG_FUNCTION_ARGS)
/* /*
* Since we've checked that this redirect points to a line * Since we've checked that this redirect points to a line
* pointer between FirstOffsetNumber and maxoff, it should * pointer between FirstOffsetNumber and maxoff, it should now
* now be safe to fetch the referenced line pointer. We expect * be safe to fetch the referenced line pointer. We expect it
* it to be LP_NORMAL; if not, that's corruption. * to be LP_NORMAL; if not, that's corruption.
*/ */
rditem = PageGetItemId(ctx.page, rdoffnum); rditem = PageGetItemId(ctx.page, rdoffnum);
if (!ItemIdIsUsed(rditem)) if (!ItemIdIsUsed(rditem))
@ -610,8 +610,8 @@ verify_heapam(PG_FUNCTION_ARGS)
{ {
/* /*
* We should not have set successor[ctx.offnum] to a value * We should not have set successor[ctx.offnum] to a value
* other than InvalidOffsetNumber unless that line pointer * other than InvalidOffsetNumber unless that line pointer is
* is LP_NORMAL. * LP_NORMAL.
*/ */
Assert(ItemIdIsNormal(next_lp)); Assert(ItemIdIsNormal(next_lp));
@ -642,8 +642,8 @@ verify_heapam(PG_FUNCTION_ARGS)
} }
/* /*
* If the next line pointer is a redirect, or if it's a tuple * If the next line pointer is a redirect, or if it's a tuple but
* but the XMAX of this tuple doesn't match the XMIN of the next * the XMAX of this tuple doesn't match the XMIN of the next
* tuple, then the two aren't part of the same update chain and * tuple, then the two aren't part of the same update chain and
* there is nothing more to do. * there is nothing more to do.
*/ */
@ -667,8 +667,8 @@ verify_heapam(PG_FUNCTION_ARGS)
} }
/* /*
* This tuple and the tuple to which it points seem to be part * This tuple and the tuple to which it points seem to be part of
* of an update chain. * an update chain.
*/ */
predecessor[nextoffnum] = ctx.offnum; predecessor[nextoffnum] = ctx.offnum;
@ -721,8 +721,8 @@ verify_heapam(PG_FUNCTION_ARGS)
} }
/* /*
* If the current tuple's xmin is aborted but the successor tuple's * If the current tuple's xmin is aborted but the successor
* xmin is in-progress or committed, that's corruption. * tuple's xmin is in-progress or committed, that's corruption.
*/ */
if (xmin_commit_status_ok[ctx.offnum] && if (xmin_commit_status_ok[ctx.offnum] &&
xmin_commit_status[ctx.offnum] == XID_ABORTED && xmin_commit_status[ctx.offnum] == XID_ABORTED &&
@ -1025,7 +1025,7 @@ check_tuple_visibility(HeapCheckContext *ctx, bool *xmin_commit_status_ok,
HeapTupleHeader tuphdr = ctx->tuphdr; HeapTupleHeader tuphdr = ctx->tuphdr;
ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */ ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */
*xmin_commit_status_ok = false; /* have not yet proven otherwise */ *xmin_commit_status_ok = false; /* have not yet proven otherwise */
/* If xmin is normal, it should be within valid range */ /* If xmin is normal, it should be within valid range */
xmin = HeapTupleHeaderGetXmin(tuphdr); xmin = HeapTupleHeaderGetXmin(tuphdr);
@ -1837,7 +1837,7 @@ check_tuple(HeapCheckContext *ctx, bool *xmin_commit_status_ok,
* therefore cannot check it. * therefore cannot check it.
*/ */
if (!check_tuple_visibility(ctx, xmin_commit_status_ok, if (!check_tuple_visibility(ctx, xmin_commit_status_ok,
xmin_commit_status)) xmin_commit_status))
return; return;
/* /*
@ -1897,8 +1897,8 @@ FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx)
diff = (int32) (ctx->next_xid - xid); diff = (int32) (ctx->next_xid - xid);
/* /*
* In cases of corruption we might see a 32bit xid that is before epoch * In cases of corruption we might see a 32bit xid that is before epoch 0.
* 0. We can't represent that as a 64bit xid, due to 64bit xids being * We can't represent that as a 64bit xid, due to 64bit xids being
* unsigned integers, without the modulo arithmetic of 32bit xid. There's * unsigned integers, without the modulo arithmetic of 32bit xid. There's
* no really nice way to deal with that, but it works ok enough to use * no really nice way to deal with that, but it works ok enough to use
* FirstNormalFullTransactionId in that case, as a freshly initdb'd * FirstNormalFullTransactionId in that case, as a freshly initdb'd

View File

@ -19,7 +19,7 @@ sub query_log
local $ENV{PGOPTIONS} = join " ", local $ENV{PGOPTIONS} = join " ",
map { "-c $_=$params->{$_}" } keys %$params; map { "-c $_=$params->{$_}" } keys %$params;
my $log = $node->logfile(); my $log = $node->logfile();
my $offset = -s $log; my $offset = -s $log;
$node->safe_psql("postgres", $sql); $node->safe_psql("postgres", $sql);
@ -113,7 +113,7 @@ $log_contents = query_log(
"SELECT * FROM pg_class;", "SELECT * FROM pg_class;",
{ {
"auto_explain.log_verbose" => "on", "auto_explain.log_verbose" => "on",
"compute_query_id" => "on" "compute_query_id" => "on"
}); });
like( like(
@ -127,7 +127,7 @@ $log_contents = query_log(
"SELECT * FROM pg_class;", "SELECT * FROM pg_class;",
{ {
"auto_explain.log_verbose" => "on", "auto_explain.log_verbose" => "on",
"compute_query_id" => "regress" "compute_query_id" => "regress"
}); });
unlike( unlike(

View File

@ -25,7 +25,7 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
# This is only needed on Windows machines that don't use UNIX sockets. # This is only needed on Windows machines that don't use UNIX sockets.
$node->init( $node->init(
'allows_streaming' => 1, 'allows_streaming' => 1,
'auth_extra' => [ '--create-role', 'backupuser' ]); 'auth_extra' => [ '--create-role', 'backupuser' ]);
$node->append_conf('postgresql.conf', $node->append_conf('postgresql.conf',
"shared_preload_libraries = 'basebackup_to_shell'"); "shared_preload_libraries = 'basebackup_to_shell'");
@ -50,7 +50,7 @@ $node->command_fails_like(
'fails if basebackup_to_shell.command is not set'); 'fails if basebackup_to_shell.command is not set');
# Configure basebackup_to_shell.command and reload the configuration file. # Configure basebackup_to_shell.command and reload the configuration file.
my $backup_path = PostgreSQL::Test::Utils::tempdir; my $backup_path = PostgreSQL::Test::Utils::tempdir;
my $escaped_backup_path = $backup_path; my $escaped_backup_path = $backup_path;
$escaped_backup_path =~ s{\\}{\\\\}g $escaped_backup_path =~ s{\\}{\\\\}g
if ($PostgreSQL::Test::Utils::windows_os); if ($PostgreSQL::Test::Utils::windows_os);

View File

@ -407,8 +407,8 @@ basic_archive_shutdown(ArchiveModuleState *state)
MemoryContext basic_archive_context; MemoryContext basic_archive_context;
/* /*
* If we didn't get to storing the pointer to our allocated state, we don't * If we didn't get to storing the pointer to our allocated state, we
* have anything to clean up. * don't have anything to clean up.
*/ */
if (data == NULL) if (data == NULL)
return; return;

View File

@ -1287,7 +1287,7 @@ dblink_get_connections(PG_FUNCTION_ARGS)
if (astate) if (astate)
PG_RETURN_DATUM(makeArrayResult(astate, PG_RETURN_DATUM(makeArrayResult(astate,
CurrentMemoryContext)); CurrentMemoryContext));
else else
PG_RETURN_NULL(); PG_RETURN_NULL();
} }

View File

@ -83,7 +83,7 @@ else
$outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid'; $outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
} }
my $sql = my $sql =
"select $outf from " "select $outf from "
. join(', ', keys %table) . join(', ', keys %table)
. " where " . " where "
. join(' AND ', @where) . ';'; . join(' AND ', @where) . ';';
@ -100,9 +100,9 @@ if ($opt{e})
print @plan; print @plan;
} }
my $t0 = [gettimeofday]; my $t0 = [gettimeofday];
my $count = 0; my $count = 0;
my $b = $opt{b}; my $b = $opt{b};
$b ||= 1; $b ||= 1;
my @a; my @a;
foreach (1 .. $b) foreach (1 .. $b)

View File

@ -19,7 +19,7 @@ create table message_section_map (
EOT EOT
open(my $msg, '>', "message.tmp") || die; open(my $msg, '>', "message.tmp") || die;
open(my $map, '>', "message_section_map.tmp") || die; open(my $map, '>', "message_section_map.tmp") || die;
srand(1); srand(1);

View File

@ -43,7 +43,7 @@ ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
ltree *left, ltree *right) ltree *left, ltree *right)
{ {
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) + int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
(left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0); (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size); ltree_gist *result = palloc(size);
SET_VARSIZE(result, size); SET_VARSIZE(result, size);

View File

@ -175,7 +175,7 @@ Datum
ltree_in(PG_FUNCTION_ARGS) ltree_in(PG_FUNCTION_ARGS)
{ {
char *buf = (char *) PG_GETARG_POINTER(0); char *buf = (char *) PG_GETARG_POINTER(0);
ltree *res; ltree *res;
if ((res = parse_ltree(buf, fcinfo->context)) == NULL) if ((res = parse_ltree(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL(); PG_RETURN_NULL();
@ -584,7 +584,7 @@ parse_lquery(const char *buf, struct Node *escontext)
*/ */
static bool static bool
finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos, finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos,
struct Node *escontext) struct Node *escontext)
{ {
if (is_lquery) if (is_lquery)
{ {
@ -745,7 +745,7 @@ Datum
lquery_in(PG_FUNCTION_ARGS) lquery_in(PG_FUNCTION_ARGS)
{ {
char *buf = (char *) PG_GETARG_POINTER(0); char *buf = (char *) PG_GETARG_POINTER(0);
lquery *res; lquery *res;
if ((res = parse_lquery(buf, fcinfo->context)) == NULL) if ((res = parse_lquery(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL(); PG_RETURN_NULL();

View File

@ -186,8 +186,8 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long"))); errmsg("word is too long")));
if (! pushquery(state, type, ltree_crc32_sz(strval, lenval), if (!pushquery(state, type, ltree_crc32_sz(strval, lenval),
state->curop - state->op, lenval, flag)) state->curop - state->op, lenval, flag))
return false; return false;
while (state->curop - state->op + lenval + 1 >= state->lenop) while (state->curop - state->op + lenval + 1 >= state->lenop)
@ -408,7 +408,7 @@ PG_FUNCTION_INFO_V1(ltxtq_in);
Datum Datum
ltxtq_in(PG_FUNCTION_ARGS) ltxtq_in(PG_FUNCTION_ARGS)
{ {
ltxtquery *res; ltxtquery *res;
if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL) if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL)
PG_RETURN_NULL(); PG_RETURN_NULL();

View File

@ -21,7 +21,7 @@ $node->start;
# setup # setup
$node->safe_psql("postgres", $node->safe_psql("postgres",
"CREATE EXTENSION pg_prewarm;\n" "CREATE EXTENSION pg_prewarm;\n"
. "CREATE TABLE test(c1 int);\n" . "CREATE TABLE test(c1 int);\n"
. "INSERT INTO test SELECT generate_series(1, 100);"); . "INSERT INTO test SELECT generate_series(1, 100);");

View File

@ -252,8 +252,8 @@ GetWALBlockInfo(FunctionCallInfo fcinfo, XLogReaderState *record,
int block_id; int block_id;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
RmgrData desc; RmgrData desc;
const char *record_type; const char *record_type;
StringInfoData rec_desc; StringInfoData rec_desc;
Assert(XLogRecHasAnyBlockRefs(record)); Assert(XLogRecHasAnyBlockRefs(record));

View File

@ -61,7 +61,7 @@ typedef struct ConnCacheEntry
bool have_error; /* have any subxacts aborted in this xact? */ bool have_error; /* have any subxacts aborted in this xact? */
bool changing_xact_state; /* xact state change in process */ bool changing_xact_state; /* xact state change in process */
bool parallel_commit; /* do we commit (sub)xacts in parallel? */ bool parallel_commit; /* do we commit (sub)xacts in parallel? */
bool parallel_abort; /* do we abort (sub)xacts in parallel? */ bool parallel_abort; /* do we abort (sub)xacts in parallel? */
bool invalidated; /* true if reconnect is pending */ bool invalidated; /* true if reconnect is pending */
bool keep_connections; /* setting value of keep_connections bool keep_connections; /* setting value of keep_connections
* server option */ * server option */

View File

@ -2024,9 +2024,8 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
/* /*
* Should never get called when the insert is being performed on a table * Should never get called when the insert is being performed on a table
* that is also among the target relations of an UPDATE operation, * that is also among the target relations of an UPDATE operation, because
* because postgresBeginForeignInsert() currently rejects such insert * postgresBeginForeignInsert() currently rejects such insert attempts.
* attempts.
*/ */
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL); Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
@ -5167,15 +5166,15 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
*/ */
if (method != ANALYZE_SAMPLE_OFF) if (method != ANALYZE_SAMPLE_OFF)
{ {
bool can_tablesample; bool can_tablesample;
reltuples = postgresGetAnalyzeInfoForForeignTable(relation, reltuples = postgresGetAnalyzeInfoForForeignTable(relation,
&can_tablesample); &can_tablesample);
/* /*
* Make sure we're not choosing TABLESAMPLE when the remote relation does * Make sure we're not choosing TABLESAMPLE when the remote relation
* not support that. But only do this for "auto" - if the user explicitly * does not support that. But only do this for "auto" - if the user
* requested BERNOULLI/SYSTEM, it's better to fail. * explicitly requested BERNOULLI/SYSTEM, it's better to fail.
*/ */
if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO)) if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
method = ANALYZE_SAMPLE_RANDOM; method = ANALYZE_SAMPLE_RANDOM;
@ -5189,35 +5188,35 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
else else
{ {
/* /*
* All supported sampling methods require sampling rate, * All supported sampling methods require sampling rate, not
* not target rows directly, so we calculate that using * target rows directly, so we calculate that using the remote
* the remote reltuples value. That's imperfect, because * reltuples value. That's imperfect, because it might be off a
* it might be off a good deal, but that's not something * good deal, but that's not something we can (or should) address
* we can (or should) address here. * here.
* *
* If reltuples is too low (i.e. when table grew), we'll * If reltuples is too low (i.e. when table grew), we'll end up
* end up sampling more rows - but then we'll apply the * sampling more rows - but then we'll apply the local sampling,
* local sampling, so we get the expected sample size. * so we get the expected sample size. This is the same outcome as
* This is the same outcome as without remote sampling. * without remote sampling.
* *
* If reltuples is too high (e.g. after bulk DELETE), we * If reltuples is too high (e.g. after bulk DELETE), we will end
* will end up sampling too few rows. * up sampling too few rows.
* *
* We can't really do much better here - we could try * We can't really do much better here - we could try sampling a
* sampling a bit more rows, but we don't know how off * bit more rows, but we don't know how off the reltuples value is
* the reltuples value is so how much is "a bit more"? * so how much is "a bit more"?
* *
* Furthermore, the targrows value for partitions is * Furthermore, the targrows value for partitions is determined
* determined based on table size (relpages), which can * based on table size (relpages), which can be off in different
* be off in different ways too. Adjusting the sampling * ways too. Adjusting the sampling rate here might make the issue
* rate here might make the issue worse. * worse.
*/ */
sample_frac = targrows / reltuples; sample_frac = targrows / reltuples;
/* /*
* We should never get sampling rate outside the valid range * We should never get sampling rate outside the valid range
* (between 0.0 and 1.0), because those cases should be covered * (between 0.0 and 1.0), because those cases should be covered by
* by the previous branch that sets ANALYZE_SAMPLE_OFF. * the previous branch that sets ANALYZE_SAMPLE_OFF.
*/ */
Assert(sample_frac >= 0.0 && sample_frac <= 1.0); Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
} }

View File

@ -183,7 +183,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
/* See if we already cached the result. */ /* See if we already cached the result. */
entry = (ShippableCacheEntry *) entry = (ShippableCacheEntry *)
hash_search(ShippableCacheHash, &key, HASH_FIND, NULL); hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
if (!entry) if (!entry)
{ {
@ -196,7 +196,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
* cache invalidation. * cache invalidation.
*/ */
entry = (ShippableCacheEntry *) entry = (ShippableCacheEntry *)
hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL); hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
entry->shippable = shippable; entry->shippable = shippable;
} }

View File

@ -6,14 +6,14 @@ use strict;
use warnings; use warnings;
my $integer = '[+-]?[0-9]+'; my $integer = '[+-]?[0-9]+';
my $real = '[+-]?[0-9]+\.[0-9]+'; my $real = '[+-]?[0-9]+\.[0-9]+';
my $RANGE = '(\.\.)(\.)?'; my $RANGE = '(\.\.)(\.)?';
my $PLUMIN = q(\'\+\-\'); my $PLUMIN = q(\'\+\-\');
my $FLOAT = "(($integer)|($real))([eE]($integer))?"; my $FLOAT = "(($integer)|($real))([eE]($integer))?";
my $EXTENSION = '<|>|~'; my $EXTENSION = '<|>|~';
my $boundary = "($EXTENSION)?$FLOAT"; my $boundary = "($EXTENSION)?$FLOAT";
my $deviation = $FLOAT; my $deviation = $FLOAT;
my $rule_1 = $boundary . $PLUMIN . $deviation; my $rule_1 = $boundary . $PLUMIN . $deviation;

View File

@ -92,7 +92,7 @@ regression_slot3|t|t),
# replication statistics data is fine after restart. # replication statistics data is fine after restart.
$node->stop; $node->stop;
my $datadir = $node->data_dir; my $datadir = $node->data_dir;
my $slot3_replslotdir = "$datadir/pg_replslot/regression_slot3"; my $slot3_replslotdir = "$datadir/pg_replslot/regression_slot3";
rmtree($slot3_replslotdir); rmtree($slot3_replslotdir);

View File

@ -288,7 +288,7 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{ {
TestDecodingData *data = ctx->output_plugin_private; TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata = TestDecodingTxnData *txndata =
MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false; txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata; txn->output_plugin_private = txndata;
@ -348,7 +348,7 @@ pg_decode_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{ {
TestDecodingData *data = ctx->output_plugin_private; TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata = TestDecodingTxnData *txndata =
MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false; txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata; txn->output_plugin_private = txndata;

View File

@ -34,7 +34,7 @@ print "<tbody>\n";
while (<$feat>) while (<$feat>)
{ {
chomp; chomp;
my ($feature_id, $feature_name, $subfeature_id, my ($feature_id, $feature_name, $subfeature_id,
$subfeature_name, $is_supported, $comments) = split /\t/; $subfeature_name, $is_supported, $comments) = split /\t/;
$is_supported eq $yesno || next; $is_supported eq $yesno || next;

View File

@ -700,8 +700,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
} }
/* /*
* If we found a scan key eliminating the range, no need to * If we found a scan key eliminating the range, no need
* check additional ones. * to check additional ones.
*/ */
if (!addrange) if (!addrange)
break; break;
@ -1223,7 +1223,7 @@ brin_build_desc(Relation rel)
* Obtain BrinOpcInfo for each indexed column. While at it, accumulate * Obtain BrinOpcInfo for each indexed column. While at it, accumulate
* the number of columns stored, since the number is opclass-defined. * the number of columns stored, since the number is opclass-defined.
*/ */
opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts); opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts);
for (keyno = 0; keyno < tupdesc->natts; keyno++) for (keyno = 0; keyno < tupdesc->natts; keyno++)
{ {
FmgrInfo *opcInfoFn; FmgrInfo *opcInfoFn;
@ -1801,8 +1801,8 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
bval = &dtup->bt_columns[keyno]; bval = &dtup->bt_columns[keyno];
/* /*
* Does the range have actual NULL values? Either of the flags can * Does the range have actual NULL values? Either of the flags can be
* be set, but we ignore the state before adding first row. * set, but we ignore the state before adding first row.
* *
* We have to remember this, because we'll modify the flags and we * We have to remember this, because we'll modify the flags and we
* need to know if the range started as empty. * need to know if the range started as empty.
@ -1842,12 +1842,12 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
/* /*
* If the range was had actual NULL values (i.e. did not start empty), * If the range was had actual NULL values (i.e. did not start empty),
* make sure we don't forget about the NULL values. Either the allnulls * make sure we don't forget about the NULL values. Either the
* flag is still set to true, or (if the opclass cleared it) we need to * allnulls flag is still set to true, or (if the opclass cleared it)
* set hasnulls=true. * we need to set hasnulls=true.
* *
* XXX This can only happen when the opclass modified the tuple, so the * XXX This can only happen when the opclass modified the tuple, so
* modified flag should be set. * the modified flag should be set.
*/ */
if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls)) if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls))
{ {
@ -1859,9 +1859,9 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
/* /*
* After updating summaries for all the keys, mark it as not empty. * After updating summaries for all the keys, mark it as not empty.
* *
* If we're actually changing the flag value (i.e. tuple started as empty), * If we're actually changing the flag value (i.e. tuple started as
* we should have modified the tuple. So we should not see empty range that * empty), we should have modified the tuple. So we should not see empty
* was not modified. * range that was not modified.
*/ */
Assert(!dtup->bt_empty_range || modified); Assert(!dtup->bt_empty_range || modified);
dtup->bt_empty_range = false; dtup->bt_empty_range = false;

View File

@ -1717,7 +1717,7 @@ allocateReloptStruct(Size base, relopt_value *options, int numoptions)
if (optstr->fill_cb) if (optstr->fill_cb)
{ {
const char *val = optval->isset ? optval->values.string_val : const char *val = optval->isset ? optval->values.string_val :
optstr->default_isnull ? NULL : optstr->default_val; optstr->default_isnull ? NULL : optstr->default_val;
size += optstr->fill_cb(val, NULL); size += optstr->fill_cb(val, NULL);
} }
@ -1796,8 +1796,8 @@ fillRelOptions(void *rdopts, Size basesize,
if (optstring->fill_cb) if (optstring->fill_cb)
{ {
Size size = Size size =
optstring->fill_cb(string_val, optstring->fill_cb(string_val,
(char *) rdopts + offset); (char *) rdopts + offset);
if (size) if (size)
{ {

View File

@ -1117,7 +1117,7 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset)) for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
{ {
IndexTuple ituple = (IndexTuple) IndexTuple ituple = (IndexTuple)
PageGetItem(page, PageGetItemId(page, offset)); PageGetItem(page, PageGetItemId(page, offset));
if (downlink == NULL) if (downlink == NULL)
downlink = CopyIndexTuple(ituple); downlink = CopyIndexTuple(ituple);

View File

@ -598,7 +598,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
{ {
GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc); GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc);
GISTNodeBuffer *newNodeBuffer; GISTNodeBuffer *newNodeBuffer;
int i = foreach_current_index(lc); int i = foreach_current_index(lc);
/* Decompress parent index tuple of node buffer page. */ /* Decompress parent index tuple of node buffer page. */
gistDeCompressAtt(giststate, r, gistDeCompressAtt(giststate, r,

View File

@ -657,7 +657,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL) if (so->killedItems == NULL)
{ {
MemoryContext oldCxt = MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt); MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems = so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage (OffsetNumber *) palloc(MaxIndexTuplesPerPage
@ -694,7 +694,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL) if (so->killedItems == NULL)
{ {
MemoryContext oldCxt = MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt); MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems = so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage (OffsetNumber *) palloc(MaxIndexTuplesPerPage

View File

@ -125,7 +125,7 @@ gistRedoPageUpdateRecord(XLogReaderState *record)
if (data - begin < datalen) if (data - begin < datalen)
{ {
OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber : OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
OffsetNumberNext(PageGetMaxOffsetNumber(page)); OffsetNumberNext(PageGetMaxOffsetNumber(page));
while (data - begin < datalen) while (data - begin < datalen)
{ {

View File

@ -289,7 +289,8 @@ hashtext(PG_FUNCTION_ARGS)
} }
else else
{ {
Size bsize, rsize; Size bsize,
rsize;
char *buf; char *buf;
const char *keydata = VARDATA_ANY(key); const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key); size_t keylen = VARSIZE_ANY_EXHDR(key);
@ -304,8 +305,8 @@ hashtext(PG_FUNCTION_ARGS)
/* /*
* In principle, there's no reason to include the terminating NUL * In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior * character in the hash, but it was done before and the behavior must
* must be preserved. * be preserved.
*/ */
result = hash_any((uint8_t *) buf, bsize + 1); result = hash_any((uint8_t *) buf, bsize + 1);
@ -343,7 +344,8 @@ hashtextextended(PG_FUNCTION_ARGS)
} }
else else
{ {
Size bsize, rsize; Size bsize,
rsize;
char *buf; char *buf;
const char *keydata = VARDATA_ANY(key); const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key); size_t keylen = VARSIZE_ANY_EXHDR(key);
@ -357,8 +359,8 @@ hashtextextended(PG_FUNCTION_ARGS)
/* /*
* In principle, there's no reason to include the terminating NUL * In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior * character in the hash, but it was done before and the behavior must
* must be preserved. * be preserved.
*/ */
result = hash_any_extended((uint8_t *) buf, bsize + 1, result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1)); PG_GETARG_INT64(1));

View File

@ -2491,7 +2491,7 @@ static inline bool
xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
{ {
const uint16 interesting = const uint16 interesting =
HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
if ((new_infomask & interesting) != (old_infomask & interesting)) if ((new_infomask & interesting) != (old_infomask & interesting))
return true; return true;

View File

@ -334,8 +334,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
* Note: heap_update returns the tid (location) of the new tuple in the * Note: heap_update returns the tid (location) of the new tuple in the
* t_self field. * t_self field.
* *
* If the update is not HOT, we must update all indexes. If the update * If the update is not HOT, we must update all indexes. If the update is
* is HOT, it could be that we updated summarized columns, so we either * HOT, it could be that we updated summarized columns, so we either
* update only summarized indexes, or none at all. * update only summarized indexes, or none at all.
*/ */
if (result != TM_Ok) if (result != TM_Ok)

View File

@ -376,7 +376,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate,
if (use_fsm && i >= not_in_fsm_pages) if (use_fsm && i >= not_in_fsm_pages)
{ {
Size freespace = BufferGetPageSize(victim_buffers[i]) - Size freespace = BufferGetPageSize(victim_buffers[i]) -
SizeOfPageHeaderData; SizeOfPageHeaderData;
RecordPageWithFreeSpace(relation, curBlock, freespace); RecordPageWithFreeSpace(relation, curBlock, freespace);
} }

View File

@ -532,7 +532,7 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
if (!TransactionIdIsValid(prstate->old_snap_xmin)) if (!TransactionIdIsValid(prstate->old_snap_xmin))
{ {
TransactionId horizon = TransactionId horizon =
GlobalVisTestNonRemovableHorizon(prstate->vistest); GlobalVisTestNonRemovableHorizon(prstate->vistest);
TransactionIdLimitedForOldSnapshots(horizon, prstate->rel, TransactionIdLimitedForOldSnapshots(horizon, prstate->rel,
&prstate->old_snap_xmin, &prstate->old_snap_xmin,

View File

@ -389,6 +389,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED); Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
Assert(params->truncate != VACOPTVALUE_UNSPECIFIED && Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
params->truncate != VACOPTVALUE_AUTO); params->truncate != VACOPTVALUE_AUTO);
/* /*
* While VacuumFailSafeActive is reset to false before calling this, we * While VacuumFailSafeActive is reset to false before calling this, we
* still need to reset it here due to recursive calls. * still need to reset it here due to recursive calls.
@ -1813,12 +1814,12 @@ retry:
{ {
/* /*
* We have no freeze plans to execute, so there's no added cost * We have no freeze plans to execute, so there's no added cost
* from following the freeze path. That's why it was chosen. * from following the freeze path. That's why it was chosen. This
* This is important in the case where the page only contains * is important in the case where the page only contains totally
* totally frozen tuples at this point (perhaps only following * frozen tuples at this point (perhaps only following pruning).
* pruning). Such pages can be marked all-frozen in the VM by our * Such pages can be marked all-frozen in the VM by our caller,
* caller, even though none of its tuples were newly frozen here * even though none of its tuples were newly frozen here (note
* (note that the "no freeze" path never sets pages all-frozen). * that the "no freeze" path never sets pages all-frozen).
* *
* We never increment the frozen_pages instrumentation counter * We never increment the frozen_pages instrumentation counter
* here, since it only counts pages with newly frozen tuples * here, since it only counts pages with newly frozen tuples
@ -3117,8 +3118,8 @@ dead_items_max_items(LVRelState *vacrel)
{ {
int64 max_items; int64 max_items;
int vac_work_mem = IsAutoVacuumWorkerProcess() && int vac_work_mem = IsAutoVacuumWorkerProcess() &&
autovacuum_work_mem != -1 ? autovacuum_work_mem != -1 ?
autovacuum_work_mem : maintenance_work_mem; autovacuum_work_mem : maintenance_work_mem;
if (vacrel->nindexes > 0) if (vacrel->nindexes > 0)
{ {

View File

@ -626,7 +626,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
static Buffer static Buffer
vm_extend(Relation rel, BlockNumber vm_nblocks) vm_extend(Relation rel, BlockNumber vm_nblocks)
{ {
Buffer buf; Buffer buf;
buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL, buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
EB_CREATE_FORK_IF_NEEDED | EB_CREATE_FORK_IF_NEEDED |

View File

@ -2947,7 +2947,7 @@ void
_bt_pendingfsm_finalize(Relation rel, BTVacState *vstate) _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
{ {
IndexBulkDeleteResult *stats = vstate->stats; IndexBulkDeleteResult *stats = vstate->stats;
Relation heaprel = vstate->info->heaprel; Relation heaprel = vstate->info->heaprel;
Assert(stats->pages_newly_deleted >= vstate->npendingpages); Assert(stats->pages_newly_deleted >= vstate->npendingpages);
@ -3027,7 +3027,7 @@ _bt_pendingfsm_add(BTVacState *vstate,
if (vstate->npendingpages > 0) if (vstate->npendingpages > 0)
{ {
FullTransactionId lastsafexid = FullTransactionId lastsafexid =
vstate->pendingpages[vstate->npendingpages - 1].safexid; vstate->pendingpages[vstate->npendingpages - 1].safexid;
Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid)); Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
} }

View File

@ -27,7 +27,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record)
if (info == XLOG_DBASE_CREATE_FILE_COPY) if (info == XLOG_DBASE_CREATE_FILE_COPY)
{ {
xl_dbase_create_file_copy_rec *xlrec = xl_dbase_create_file_copy_rec *xlrec =
(xl_dbase_create_file_copy_rec *) rec; (xl_dbase_create_file_copy_rec *) rec;
appendStringInfo(buf, "copy dir %u/%u to %u/%u", appendStringInfo(buf, "copy dir %u/%u to %u/%u",
xlrec->src_tablespace_id, xlrec->src_db_id, xlrec->src_tablespace_id, xlrec->src_db_id,
@ -36,7 +36,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record)
else if (info == XLOG_DBASE_CREATE_WAL_LOG) else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{ {
xl_dbase_create_wal_log_rec *xlrec = xl_dbase_create_wal_log_rec *xlrec =
(xl_dbase_create_wal_log_rec *) rec; (xl_dbase_create_wal_log_rec *) rec;
appendStringInfo(buf, "create dir %u/%u", appendStringInfo(buf, "create dir %u/%u",
xlrec->tablespace_id, xlrec->db_id); xlrec->tablespace_id, xlrec->db_id);

View File

@ -120,7 +120,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
else else
{ {
ginxlogInsertDataInternal *insertData = ginxlogInsertDataInternal *insertData =
(ginxlogInsertDataInternal *) payload; (ginxlogInsertDataInternal *) payload;
appendStringInfo(buf, " pitem: %u-%u/%u", appendStringInfo(buf, " pitem: %u-%u/%u",
PostingItemGetBlockNumber(&insertData->newitem), PostingItemGetBlockNumber(&insertData->newitem),
@ -156,7 +156,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
else else
{ {
ginxlogVacuumDataLeafPage *xlrec = ginxlogVacuumDataLeafPage *xlrec =
(ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
desc_recompress_leaf(buf, &xlrec->data); desc_recompress_leaf(buf, &xlrec->data);
} }

View File

@ -115,7 +115,7 @@ spgAllocSearchItem(SpGistScanOpaque so, bool isnull, double *distances)
{ {
/* allocate distance array only for non-NULL items */ /* allocate distance array only for non-NULL items */
SpGistSearchItem *item = SpGistSearchItem *item =
palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys)); palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
item->isNull = isnull; item->isNull = isnull;
@ -130,7 +130,7 @@ static void
spgAddStartItem(SpGistScanOpaque so, bool isnull) spgAddStartItem(SpGistScanOpaque so, bool isnull)
{ {
SpGistSearchItem *startEntry = SpGistSearchItem *startEntry =
spgAllocSearchItem(so, isnull, so->zeroDistances); spgAllocSearchItem(so, isnull, so->zeroDistances);
ItemPointerSet(&startEntry->heapPtr, ItemPointerSet(&startEntry->heapPtr,
isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO, isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO,
@ -768,7 +768,7 @@ spgTestLeafTuple(SpGistScanOpaque so,
storeRes_func storeRes) storeRes_func storeRes)
{ {
SpGistLeafTuple leafTuple = (SpGistLeafTuple) SpGistLeafTuple leafTuple = (SpGistLeafTuple)
PageGetItem(page, PageGetItemId(page, offset)); PageGetItem(page, PageGetItemId(page, offset));
if (leafTuple->tupstate != SPGIST_LIVE) if (leafTuple->tupstate != SPGIST_LIVE)
{ {
@ -896,7 +896,7 @@ redirect:
else /* page is inner */ else /* page is inner */
{ {
SpGistInnerTuple innerTuple = (SpGistInnerTuple) SpGistInnerTuple innerTuple = (SpGistInnerTuple)
PageGetItem(page, PageGetItemId(page, offset)); PageGetItem(page, PageGetItemId(page, offset));
if (innerTuple->tupstate != SPGIST_LIVE) if (innerTuple->tupstate != SPGIST_LIVE)
{ {
@ -974,7 +974,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
else else
{ {
IndexOrderByDistance *distances = IndexOrderByDistance *distances =
palloc(sizeof(distances[0]) * so->numberOfOrderBys); palloc(sizeof(distances[0]) * so->numberOfOrderBys);
int i; int i;
for (i = 0; i < so->numberOfOrderBys; i++) for (i = 0; i < so->numberOfOrderBys; i++)

View File

@ -112,7 +112,7 @@ TableScanDesc
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key) table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
{ {
uint32 flags = SO_TYPE_SEQSCAN | uint32 flags = SO_TYPE_SEQSCAN |
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
Oid relid = RelationGetRelid(relation); Oid relid = RelationGetRelid(relation);
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
@ -176,7 +176,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
{ {
Snapshot snapshot; Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN | uint32 flags = SO_TYPE_SEQSCAN |
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
Assert(RelationGetRelid(relation) == pscan->phs_relid); Assert(RelationGetRelid(relation) == pscan->phs_relid);

View File

@ -3270,7 +3270,7 @@ multixact_redo(XLogReaderState *record)
else if (info == XLOG_MULTIXACT_CREATE_ID) else if (info == XLOG_MULTIXACT_CREATE_ID)
{ {
xl_multixact_create *xlrec = xl_multixact_create *xlrec =
(xl_multixact_create *) XLogRecGetData(record); (xl_multixact_create *) XLogRecGetData(record);
TransactionId max_xid; TransactionId max_xid;
int i; int i;

View File

@ -375,8 +375,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace); shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
/* /*
* Serialize the transaction snapshot if the transaction * Serialize the transaction snapshot if the transaction isolation
* isolation level uses a transaction snapshot. * level uses a transaction snapshot.
*/ */
if (IsolationUsesXactSnapshot()) if (IsolationUsesXactSnapshot())
{ {
@ -1497,8 +1497,8 @@ ParallelWorkerMain(Datum main_arg)
RestoreClientConnectionInfo(clientconninfospace); RestoreClientConnectionInfo(clientconninfospace);
/* /*
* Initialize SystemUser now that MyClientConnectionInfo is restored. * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
* Also ensure that auth_method is actually valid, aka authn_id is not NULL. * ensure that auth_method is actually valid, aka authn_id is not NULL.
*/ */
if (MyClientConnectionInfo.authn_id) if (MyClientConnectionInfo.authn_id)
InitializeSystemUser(MyClientConnectionInfo.authn_id, InitializeSystemUser(MyClientConnectionInfo.authn_id,

View File

@ -3152,10 +3152,9 @@ CommitTransactionCommand(void)
break; break;
/* /*
* The user issued a SAVEPOINT inside a transaction block. * The user issued a SAVEPOINT inside a transaction block. Start a
* Start a subtransaction. (DefineSavepoint already did * subtransaction. (DefineSavepoint already did PushTransaction,
* PushTransaction, so as to have someplace to put the SUBBEGIN * so as to have someplace to put the SUBBEGIN state.)
* state.)
*/ */
case TBLOCK_SUBBEGIN: case TBLOCK_SUBBEGIN:
StartSubTransaction(); StartSubTransaction();
@ -4696,9 +4695,9 @@ RollbackAndReleaseCurrentSubTransaction(void)
s = CurrentTransactionState; /* changed by pop */ s = CurrentTransactionState; /* changed by pop */
Assert(s->blockState == TBLOCK_SUBINPROGRESS || Assert(s->blockState == TBLOCK_SUBINPROGRESS ||
s->blockState == TBLOCK_INPROGRESS || s->blockState == TBLOCK_INPROGRESS ||
s->blockState == TBLOCK_IMPLICIT_INPROGRESS || s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
s->blockState == TBLOCK_STARTED); s->blockState == TBLOCK_STARTED);
} }
/* /*

View File

@ -5460,8 +5460,8 @@ StartupXLOG(void)
missingContrecPtr = endOfRecoveryInfo->missingContrecPtr; missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
/* /*
* Reset ps status display, so as no information related to recovery * Reset ps status display, so as no information related to recovery shows
* shows up. * up.
*/ */
set_ps_display(""); set_ps_display("");
@ -5596,9 +5596,9 @@ StartupXLOG(void)
if (!XLogRecPtrIsInvalid(missingContrecPtr)) if (!XLogRecPtrIsInvalid(missingContrecPtr))
{ {
/* /*
* We should only have a missingContrecPtr if we're not switching to * We should only have a missingContrecPtr if we're not switching to a
* a new timeline. When a timeline switch occurs, WAL is copied from * new timeline. When a timeline switch occurs, WAL is copied from the
* the old timeline to the new only up to the end of the last complete * old timeline to the new only up to the end of the last complete
* record, so there can't be an incomplete WAL record that we need to * record, so there can't be an incomplete WAL record that we need to
* disregard. * disregard.
*/ */
@ -8494,7 +8494,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
*/ */
if (rllen > datadirpathlen && if (rllen > datadirpathlen &&
strncmp(linkpath, DataDir, datadirpathlen) == 0 && strncmp(linkpath, DataDir, datadirpathlen) == 0 &&
IS_DIR_SEP(linkpath[datadirpathlen])) IS_DIR_SEP(linkpath[datadirpathlen]))
relpath = pstrdup(linkpath + datadirpathlen + 1); relpath = pstrdup(linkpath + datadirpathlen + 1);
/* /*

View File

@ -897,8 +897,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
* *
* XLogReader machinery is only able to handle records up to a certain * XLogReader machinery is only able to handle records up to a certain
* size (ignoring machine resource limitations), so make sure that we will * size (ignoring machine resource limitations), so make sure that we will
* not emit records larger than the sizes advertised to be supported. * not emit records larger than the sizes advertised to be supported. This
* This cap is based on DecodeXLogRecordRequiredSpace(). * cap is based on DecodeXLogRecordRequiredSpace().
*/ */
if (total_len >= XLogRecordMaxSize) if (total_len >= XLogRecordMaxSize)
ereport(ERROR, ereport(ERROR,

View File

@ -569,7 +569,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
if (record_type == XLOG_DBASE_CREATE_FILE_COPY) if (record_type == XLOG_DBASE_CREATE_FILE_COPY)
{ {
xl_dbase_create_file_copy_rec *xlrec = xl_dbase_create_file_copy_rec *xlrec =
(xl_dbase_create_file_copy_rec *) record->main_data; (xl_dbase_create_file_copy_rec *) record->main_data;
RelFileLocator rlocator = RelFileLocator rlocator =
{InvalidOid, xlrec->db_id, InvalidRelFileNumber}; {InvalidOid, xlrec->db_id, InvalidRelFileNumber};
@ -596,7 +596,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
if (record_type == XLOG_SMGR_CREATE) if (record_type == XLOG_SMGR_CREATE)
{ {
xl_smgr_create *xlrec = (xl_smgr_create *) xl_smgr_create *xlrec = (xl_smgr_create *)
record->main_data; record->main_data;
if (xlrec->forkNum == MAIN_FORKNUM) if (xlrec->forkNum == MAIN_FORKNUM)
{ {
@ -624,7 +624,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
else if (record_type == XLOG_SMGR_TRUNCATE) else if (record_type == XLOG_SMGR_TRUNCATE)
{ {
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) xl_smgr_truncate *xlrec = (xl_smgr_truncate *)
record->main_data; record->main_data;
/* /*
* Don't consider prefetching anything in the truncated * Don't consider prefetching anything in the truncated

View File

@ -282,7 +282,7 @@ XLogRecPtr
XLogReleasePreviousRecord(XLogReaderState *state) XLogReleasePreviousRecord(XLogReaderState *state)
{ {
DecodedXLogRecord *record; DecodedXLogRecord *record;
XLogRecPtr next_lsn; XLogRecPtr next_lsn;
if (!state->record) if (!state->record)
return InvalidXLogRecPtr; return InvalidXLogRecPtr;

View File

@ -3215,7 +3215,7 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen,
XLogRecPtr targetRecPtr, char *readBuf) XLogRecPtr targetRecPtr, char *readBuf)
{ {
XLogPageReadPrivate *private = XLogPageReadPrivate *private =
(XLogPageReadPrivate *) xlogreader->private_data; (XLogPageReadPrivate *) xlogreader->private_data;
int emode = private->emode; int emode = private->emode;
uint32 targetPageOff; uint32 targetPageOff;
XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY; XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;

View File

@ -1609,10 +1609,10 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename,
* *
* There's no guarantee that this will actually * There's no guarantee that this will actually
* happen, though: the torn write could take an * happen, though: the torn write could take an
* arbitrarily long time to complete. Retrying multiple * arbitrarily long time to complete. Retrying
* times wouldn't fix this problem, either, though * multiple times wouldn't fix this problem, either,
* it would reduce the chances of it happening in * though it would reduce the chances of it happening
* practice. The only real fix here seems to be to * in practice. The only real fix here seems to be to
* have some kind of interlock that allows us to wait * have some kind of interlock that allows us to wait
* until we can be certain that no write to the block * until we can be certain that no write to the block
* is in progress. Since we don't have any such thing * is in progress. Since we don't have any such thing

View File

@ -350,6 +350,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
tupdesc = CreateTemplateTupleDesc(2); tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0); TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0);
/* /*
* int8 may seem like a surprising data type for this, but in theory int4 * int8 may seem like a surprising data type for this, but in theory int4
* would not be wide enough for this, as TimeLineID is unsigned. * would not be wide enough for this, as TimeLineID is unsigned.
@ -360,7 +361,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual); tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
/* Data row */ /* Data row */
values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr))); values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
values[1] = Int64GetDatum(tli); values[1] = Int64GetDatum(tli);
do_tup_output(tstate, values, nulls); do_tup_output(tstate, values, nulls);

View File

@ -28,25 +28,25 @@ sub ParseHeader
# There are a few types which are given one name in the C source, but a # There are a few types which are given one name in the C source, but a
# different name at the SQL level. These are enumerated here. # different name at the SQL level. These are enumerated here.
my %RENAME_ATTTYPE = ( my %RENAME_ATTTYPE = (
'int16' => 'int2', 'int16' => 'int2',
'int32' => 'int4', 'int32' => 'int4',
'int64' => 'int8', 'int64' => 'int8',
'Oid' => 'oid', 'Oid' => 'oid',
'NameData' => 'name', 'NameData' => 'name',
'TransactionId' => 'xid', 'TransactionId' => 'xid',
'XLogRecPtr' => 'pg_lsn'); 'XLogRecPtr' => 'pg_lsn');
my %catalog; my %catalog;
my $declaring_attributes = 0; my $declaring_attributes = 0;
my $is_varlen = 0; my $is_varlen = 0;
my $is_client_code = 0; my $is_client_code = 0;
$catalog{columns} = []; $catalog{columns} = [];
$catalog{toasting} = []; $catalog{toasting} = [];
$catalog{indexing} = []; $catalog{indexing} = [];
$catalog{other_oids} = []; $catalog{other_oids} = [];
$catalog{foreign_keys} = []; $catalog{foreign_keys} = [];
$catalog{client_code} = []; $catalog{client_code} = [];
open(my $ifh, '<', $input_file) || die "$input_file: $!"; open(my $ifh, '<', $input_file) || die "$input_file: $!";
@ -102,10 +102,10 @@ sub ParseHeader
{ {
push @{ $catalog{toasting} }, push @{ $catalog{toasting} },
{ {
parent_table => $1, parent_table => $1,
toast_oid => $2, toast_oid => $2,
toast_index_oid => $3, toast_index_oid => $3,
toast_oid_macro => $4, toast_oid_macro => $4,
toast_index_oid_macro => $5 toast_index_oid_macro => $5
}; };
} }
@ -116,11 +116,11 @@ sub ParseHeader
push @{ $catalog{indexing} }, push @{ $catalog{indexing} },
{ {
is_unique => $1 ? 1 : 0, is_unique => $1 ? 1 : 0,
is_pkey => $2 ? 1 : 0, is_pkey => $2 ? 1 : 0,
index_name => $3, index_name => $3,
index_oid => $4, index_oid => $4,
index_oid_macro => $5, index_oid_macro => $5,
index_decl => $6 index_decl => $6
}; };
} }
elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/) elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/)
@ -128,7 +128,7 @@ sub ParseHeader
push @{ $catalog{other_oids} }, push @{ $catalog{other_oids} },
{ {
other_name => $1, other_name => $1,
other_oid => $2 other_oid => $2
}; };
} }
elsif ( elsif (
@ -138,16 +138,16 @@ sub ParseHeader
push @{ $catalog{foreign_keys} }, push @{ $catalog{foreign_keys} },
{ {
is_array => $1 ? 1 : 0, is_array => $1 ? 1 : 0,
is_opt => $2 ? 1 : 0, is_opt => $2 ? 1 : 0,
fk_cols => $3, fk_cols => $3,
pk_table => $4, pk_table => $4,
pk_cols => $5 pk_cols => $5
}; };
} }
elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/) elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/)
{ {
$catalog{catname} = $1; $catalog{catname} = $1;
$catalog{relation_oid} = $2; $catalog{relation_oid} = $2;
$catalog{relation_oid_macro} = $3; $catalog{relation_oid_macro} = $3;
$catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : ''; $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : '';
@ -155,15 +155,15 @@ sub ParseHeader
/BKI_SHARED_RELATION/ ? ' shared_relation' : ''; /BKI_SHARED_RELATION/ ? ' shared_relation' : '';
if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/) if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/)
{ {
$catalog{rowtype_oid} = $1; $catalog{rowtype_oid} = $1;
$catalog{rowtype_oid_clause} = " rowtype_oid $1"; $catalog{rowtype_oid_clause} = " rowtype_oid $1";
$catalog{rowtype_oid_macro} = $2; $catalog{rowtype_oid_macro} = $2;
} }
else else
{ {
$catalog{rowtype_oid} = ''; $catalog{rowtype_oid} = '';
$catalog{rowtype_oid_clause} = ''; $catalog{rowtype_oid_clause} = '';
$catalog{rowtype_oid_macro} = ''; $catalog{rowtype_oid_macro} = '';
} }
$catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0; $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0;
$declaring_attributes = 1; $declaring_attributes = 1;
@ -209,8 +209,8 @@ sub ParseHeader
$atttype = '_' . $atttype; $atttype = '_' . $atttype;
} }
$column{type} = $atttype; $column{type} = $atttype;
$column{name} = $attname; $column{name} = $attname;
$column{is_varlen} = 1 if $is_varlen; $column{is_varlen} = 1 if $is_varlen;
foreach my $attopt (@attopts) foreach my $attopt (@attopts)
@ -243,14 +243,14 @@ sub ParseHeader
# BKI_LOOKUP implicitly makes an FK reference # BKI_LOOKUP implicitly makes an FK reference
push @{ $catalog{foreign_keys} }, push @{ $catalog{foreign_keys} },
{ {
is_array => is_array => (
($atttype eq 'oidvector' || $atttype eq '_oid') $atttype eq 'oidvector' || $atttype eq '_oid')
? 1 ? 1
: 0, : 0,
is_opt => $column{lookup_opt}, is_opt => $column{lookup_opt},
fk_cols => $attname, fk_cols => $attname,
pk_table => $column{lookup}, pk_table => $column{lookup},
pk_cols => 'oid' pk_cols => 'oid'
}; };
} }
else else
@ -285,7 +285,7 @@ sub ParseData
$input_file =~ /(\w+)\.dat$/ $input_file =~ /(\w+)\.dat$/
or die "Input file $input_file needs to be a .dat file.\n"; or die "Input file $input_file needs to be a .dat file.\n";
my $catname = $1; my $catname = $1;
my $data = []; my $data = [];
if ($preserve_formatting) if ($preserve_formatting)
{ {
@ -433,7 +433,7 @@ sub AddDefaultValues
sub GenerateArrayTypes sub GenerateArrayTypes
{ {
my $pgtype_schema = shift; my $pgtype_schema = shift;
my $types = shift; my $types = shift;
my @array_types; my @array_types;
foreach my $elem_type (@$types) foreach my $elem_type (@$types)
@ -444,9 +444,9 @@ sub GenerateArrayTypes
my %array_type; my %array_type;
# Set up metadata fields for array type. # Set up metadata fields for array type.
$array_type{oid} = $elem_type->{array_type_oid}; $array_type{oid} = $elem_type->{array_type_oid};
$array_type{autogenerated} = 1; $array_type{autogenerated} = 1;
$array_type{line_number} = $elem_type->{line_number}; $array_type{line_number} = $elem_type->{line_number};
# Set up column values derived from the element type. # Set up column values derived from the element type.
$array_type{typname} = '_' . $elem_type->{typname}; $array_type{typname} = '_' . $elem_type->{typname};
@ -499,8 +499,8 @@ sub GenerateArrayTypes
sub RenameTempFile sub RenameTempFile
{ {
my $final_name = shift; my $final_name = shift;
my $extension = shift; my $extension = shift;
my $temp_name = $final_name . $extension; my $temp_name = $final_name . $extension;
if (-f $final_name if (-f $final_name
&& compare($temp_name, $final_name) == 0) && compare($temp_name, $final_name) == 0)

View File

@ -3389,8 +3389,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask,
result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)); result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
/* /*
* Check if ACL_MAINTAIN is being checked and, if so, and not already set as * Check if ACL_MAINTAIN is being checked and, if so, and not already set
* part of the result, then check if the user is a member of the * as part of the result, then check if the user is a member of the
* pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH * pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH
* MATERIALIZED VIEW, and REINDEX on all relations. * MATERIALIZED VIEW, and REINDEX on all relations.
*/ */

View File

@ -29,12 +29,12 @@ my $include_path;
my $num_errors = 0; my $num_errors = 0;
GetOptions( GetOptions(
'output:s' => \$output_path, 'output:s' => \$output_path,
'set-version:s' => \$major_version, 'set-version:s' => \$major_version,
'include-path:s' => \$include_path) || usage(); 'include-path:s' => \$include_path) || usage();
# Sanity check arguments. # Sanity check arguments.
die "No input files.\n" unless @ARGV; die "No input files.\n" unless @ARGV;
die "--set-version must be specified.\n" unless $major_version; die "--set-version must be specified.\n" unless $major_version;
die "Invalid version string: $major_version\n" die "Invalid version string: $major_version\n"
unless $major_version =~ /^\d+$/; unless $major_version =~ /^\d+$/;
@ -67,7 +67,7 @@ foreach my $header (@ARGV)
my $catalog = Catalog::ParseHeader($header); my $catalog = Catalog::ParseHeader($header);
my $catname = $catalog->{catname}; my $catname = $catalog->{catname};
my $schema = $catalog->{columns}; my $schema = $catalog->{columns};
if (defined $catname) if (defined $catname)
{ {
@ -100,9 +100,9 @@ foreach my $header (@ARGV)
if (defined $row->{descr}) if (defined $row->{descr})
{ {
my %descr = ( my %descr = (
objoid => $row->{oid}, objoid => $row->{oid},
classoid => $catalog->{relation_oid}, classoid => $catalog->{relation_oid},
objsubid => 0, objsubid => 0,
description => $row->{descr}); description => $row->{descr});
if ($catalog->{shared_relation}) if ($catalog->{shared_relation})
@ -364,7 +364,7 @@ open(my $ef, '<', $encfile) || die "$encfile: $!";
# We're parsing an enum, so start with 0 and increment # We're parsing an enum, so start with 0 and increment
# every time we find an enum member. # every time we find an enum member.
my $encid = 0; my $encid = 0;
my $collect_encodings = 0; my $collect_encodings = 0;
while (<$ef>) while (<$ef>)
{ {
@ -387,27 +387,27 @@ close $ef;
# Map lookup name to the corresponding hash table. # Map lookup name to the corresponding hash table.
my %lookup_kind = ( my %lookup_kind = (
pg_am => \%amoids, pg_am => \%amoids,
pg_authid => \%authidoids, pg_authid => \%authidoids,
pg_class => \%classoids, pg_class => \%classoids,
pg_collation => \%collationoids, pg_collation => \%collationoids,
pg_language => \%langoids, pg_language => \%langoids,
pg_namespace => \%namespaceoids, pg_namespace => \%namespaceoids,
pg_opclass => \%opcoids, pg_opclass => \%opcoids,
pg_operator => \%operoids, pg_operator => \%operoids,
pg_opfamily => \%opfoids, pg_opfamily => \%opfoids,
pg_proc => \%procoids, pg_proc => \%procoids,
pg_tablespace => \%tablespaceoids, pg_tablespace => \%tablespaceoids,
pg_ts_config => \%tsconfigoids, pg_ts_config => \%tsconfigoids,
pg_ts_dict => \%tsdictoids, pg_ts_dict => \%tsdictoids,
pg_ts_parser => \%tsparseroids, pg_ts_parser => \%tsparseroids,
pg_ts_template => \%tstemplateoids, pg_ts_template => \%tstemplateoids,
pg_type => \%typeoids, pg_type => \%typeoids,
encoding => \%encids); encoding => \%encids);
# Open temp files # Open temp files
my $tmpext = ".tmp$$"; my $tmpext = ".tmp$$";
my $bkifile = $output_path . 'postgres.bki'; my $bkifile = $output_path . 'postgres.bki';
open my $bki, '>', $bkifile . $tmpext open my $bki, '>', $bkifile . $tmpext
or die "can't open $bkifile$tmpext: $!"; or die "can't open $bkifile$tmpext: $!";
@ -600,7 +600,7 @@ EOM
# each element of the array as per the lookup rule. # each element of the array as per the lookup rule.
if ($column->{lookup}) if ($column->{lookup})
{ {
my $lookup = $lookup_kind{ $column->{lookup} }; my $lookup = $lookup_kind{ $column->{lookup} };
my $lookup_opt = $column->{lookup_opt}; my $lookup_opt = $column->{lookup_opt};
my @lookupnames; my @lookupnames;
my @lookupoids; my @lookupoids;
@ -790,7 +790,7 @@ foreach my $catname (@catnames)
printf $fk_info printf $fk_info
"\t{ /* %s */ %s, /* %s */ %s, \"{%s}\", \"{%s}\", %s, %s},\n", "\t{ /* %s */ %s, /* %s */ %s, \"{%s}\", \"{%s}\", %s, %s},\n",
$catname, $catalog->{relation_oid}, $catname, $catalog->{relation_oid},
$pktabname, $catalogs{$pktabname}->{relation_oid}, $pktabname, $catalogs{$pktabname}->{relation_oid},
$fkinfo->{fk_cols}, $fkinfo->{fk_cols},
$fkinfo->{pk_cols}, $fkinfo->{pk_cols},
@ -809,9 +809,9 @@ close $fk_info;
close $constraints; close $constraints;
# Finally, rename the completed files into place. # Finally, rename the completed files into place.
Catalog::RenameTempFile($bkifile, $tmpext); Catalog::RenameTempFile($bkifile, $tmpext);
Catalog::RenameTempFile($schemafile, $tmpext); Catalog::RenameTempFile($schemafile, $tmpext);
Catalog::RenameTempFile($fk_info_file, $tmpext); Catalog::RenameTempFile($fk_info_file, $tmpext);
Catalog::RenameTempFile($constraints_file, $tmpext); Catalog::RenameTempFile($constraints_file, $tmpext);
exit($num_errors != 0 ? 1 : 0); exit($num_errors != 0 ? 1 : 0);
@ -845,13 +845,13 @@ sub gen_pg_attribute
push @tables_needing_macros, $table_name; push @tables_needing_macros, $table_name;
# Generate entries for user attributes. # Generate entries for user attributes.
my $attnum = 0; my $attnum = 0;
my $priorfixedwidth = 1; my $priorfixedwidth = 1;
foreach my $attr (@{ $table->{columns} }) foreach my $attr (@{ $table->{columns} })
{ {
$attnum++; $attnum++;
my %row; my %row;
$row{attnum} = $attnum; $row{attnum} = $attnum;
$row{attrelid} = $table->{relation_oid}; $row{attrelid} = $table->{relation_oid};
morph_row_for_pgattr(\%row, $schema, $attr, $priorfixedwidth); morph_row_for_pgattr(\%row, $schema, $attr, $priorfixedwidth);
@ -877,18 +877,18 @@ sub gen_pg_attribute
{ {
$attnum = 0; $attnum = 0;
my @SYS_ATTRS = ( my @SYS_ATTRS = (
{ name => 'ctid', type => 'tid' }, { name => 'ctid', type => 'tid' },
{ name => 'xmin', type => 'xid' }, { name => 'xmin', type => 'xid' },
{ name => 'cmin', type => 'cid' }, { name => 'cmin', type => 'cid' },
{ name => 'xmax', type => 'xid' }, { name => 'xmax', type => 'xid' },
{ name => 'cmax', type => 'cid' }, { name => 'cmax', type => 'cid' },
{ name => 'tableoid', type => 'oid' }); { name => 'tableoid', type => 'oid' });
foreach my $attr (@SYS_ATTRS) foreach my $attr (@SYS_ATTRS)
{ {
$attnum--; $attnum--;
my %row; my %row;
$row{attnum} = $attnum; $row{attnum} = $attnum;
$row{attrelid} = $table->{relation_oid}; $row{attrelid} = $table->{relation_oid};
$row{attstattarget} = '0'; $row{attstattarget} = '0';
morph_row_for_pgattr(\%row, $schema, $attr, 1); morph_row_for_pgattr(\%row, $schema, $attr, 1);
@ -916,10 +916,10 @@ sub morph_row_for_pgattr
# Copy the type data from pg_type, and add some type-dependent items # Copy the type data from pg_type, and add some type-dependent items
my $type = $types{$atttype}; my $type = $types{$atttype};
$row->{atttypid} = $type->{oid}; $row->{atttypid} = $type->{oid};
$row->{attlen} = $type->{typlen}; $row->{attlen} = $type->{typlen};
$row->{attbyval} = $type->{typbyval}; $row->{attbyval} = $type->{typbyval};
$row->{attalign} = $type->{typalign}; $row->{attalign} = $type->{typalign};
$row->{attstorage} = $type->{typstorage}; $row->{attstorage} = $type->{typstorage};
# set attndims if it's an array type # set attndims if it's an array type
@ -946,7 +946,7 @@ sub morph_row_for_pgattr
# At this point the width of type name is still symbolic, # At this point the width of type name is still symbolic,
# so we need a special test. # so we need a special test.
$row->{attnotnull} = $row->{attnotnull} =
$row->{attlen} eq 'NAMEDATALEN' ? 't' $row->{attlen} eq 'NAMEDATALEN' ? 't'
: $row->{attlen} > 0 ? 't' : $row->{attlen} > 0 ? 't'
: 'f'; : 'f';
} }
@ -962,15 +962,15 @@ sub morph_row_for_pgattr
# Write an entry to postgres.bki. # Write an entry to postgres.bki.
sub print_bki_insert sub print_bki_insert
{ {
my $row = shift; my $row = shift;
my $schema = shift; my $schema = shift;
my @bki_values; my @bki_values;
foreach my $column (@$schema) foreach my $column (@$schema)
{ {
my $attname = $column->{name}; my $attname = $column->{name};
my $atttype = $column->{type}; my $atttype = $column->{type};
my $bki_value = $row->{$attname}; my $bki_value = $row->{$attname};
# Fold backslash-zero to empty string if it's the entire string, # Fold backslash-zero to empty string if it's the entire string,
@ -1002,7 +1002,7 @@ sub print_bki_insert
# quite identical, to the corresponding values in postgres.bki. # quite identical, to the corresponding values in postgres.bki.
sub morph_row_for_schemapg sub morph_row_for_schemapg
{ {
my $row = shift; my $row = shift;
my $pgattr_schema = shift; my $pgattr_schema = shift;
foreach my $column (@$pgattr_schema) foreach my $column (@$pgattr_schema)
@ -1027,7 +1027,7 @@ sub morph_row_for_schemapg
# don't change. # don't change.
elsif ($atttype eq 'bool') elsif ($atttype eq 'bool')
{ {
$row->{$attname} = 'true' if $row->{$attname} eq 't'; $row->{$attname} = 'true' if $row->{$attname} eq 't';
$row->{$attname} = 'false' if $row->{$attname} eq 'f'; $row->{$attname} = 'false' if $row->{$attname} eq 'f';
} }
@ -1089,7 +1089,7 @@ sub form_pg_type_symbol
# Skip for rowtypes of bootstrap catalogs, since they have their # Skip for rowtypes of bootstrap catalogs, since they have their
# own naming convention defined elsewhere. # own naming convention defined elsewhere.
return return
if $typename eq 'pg_type' if $typename eq 'pg_type'
or $typename eq 'pg_proc' or $typename eq 'pg_proc'
or $typename eq 'pg_attribute' or $typename eq 'pg_attribute'
or $typename eq 'pg_class'; or $typename eq 'pg_class';

View File

@ -148,8 +148,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple,
#endif /* USE_ASSERT_CHECKING */ #endif /* USE_ASSERT_CHECKING */
/* /*
* Skip insertions into non-summarizing indexes if we only need * Skip insertions into non-summarizing indexes if we only need to
* to update summarizing indexes. * update summarizing indexes.
*/ */
if (onlySummarized && !indexInfo->ii_Summarizing) if (onlySummarized && !indexInfo->ii_Summarizing)
continue; continue;

View File

@ -3842,7 +3842,7 @@ recomputeNamespacePath(void)
if (OidIsValid(namespaceId) && if (OidIsValid(namespaceId) &&
!list_member_oid(oidlist, namespaceId) && !list_member_oid(oidlist, namespaceId) &&
object_aclcheck(NamespaceRelationId, namespaceId, roleid, object_aclcheck(NamespaceRelationId, namespaceId, roleid,
ACL_USAGE) == ACLCHECK_OK && ACL_USAGE) == ACLCHECK_OK &&
InvokeNamespaceSearchHook(namespaceId, false)) InvokeNamespaceSearchHook(namespaceId, false))
oidlist = lappend_oid(oidlist, namespaceId); oidlist = lappend_oid(oidlist, namespaceId);
} }
@ -3870,7 +3870,7 @@ recomputeNamespacePath(void)
if (OidIsValid(namespaceId) && if (OidIsValid(namespaceId) &&
!list_member_oid(oidlist, namespaceId) && !list_member_oid(oidlist, namespaceId) &&
object_aclcheck(NamespaceRelationId, namespaceId, roleid, object_aclcheck(NamespaceRelationId, namespaceId, roleid,
ACL_USAGE) == ACLCHECK_OK && ACL_USAGE) == ACLCHECK_OK &&
InvokeNamespaceSearchHook(namespaceId, false)) InvokeNamespaceSearchHook(namespaceId, false))
oidlist = lappend_oid(oidlist, namespaceId); oidlist = lappend_oid(oidlist, namespaceId);
} }
@ -4006,7 +4006,7 @@ InitTempTableNamespace(void)
* temp table creation request is made by someone with appropriate rights. * temp table creation request is made by someone with appropriate rights.
*/ */
if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
ACL_CREATE_TEMP) != ACLCHECK_OK) ACL_CREATE_TEMP) != ACLCHECK_OK)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to create temporary tables in database \"%s\"", errmsg("permission denied to create temporary tables in database \"%s\"",

View File

@ -625,7 +625,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId,
/* not in catalogs, different from operator, so make shell */ /* not in catalogs, different from operator, so make shell */
aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(), aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(otherNamespace)); get_namespace_name(otherNamespace));

View File

@ -1414,6 +1414,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
/* FALLTHROUGH */ /* FALLTHROUGH */
case SHARED_DEPENDENCY_OWNER: case SHARED_DEPENDENCY_OWNER:
/* /*
* Save it for deletion below, if it's a local object or a * Save it for deletion below, if it's a local object or a
* role grant. Other shared objects, such as databases, * role grant. Other shared objects, such as databases,

View File

@ -231,7 +231,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
if (OidIsValid(namespaceId)) if (OidIsValid(namespaceId))
{ {
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(), aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId)); get_namespace_name(namespaceId));
@ -1035,7 +1035,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
AclResult aclresult; AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId, aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId,
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId)); get_namespace_name(namespaceId));

View File

@ -270,8 +270,8 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e
*/ */
if (!IsBinaryUpgrade) if (!IsBinaryUpgrade)
{ {
char *langtag = icu_language_tag(colliculocale, char *langtag = icu_language_tag(colliculocale,
icu_validation_level); icu_validation_level);
if (langtag && strcmp(colliculocale, langtag) != 0) if (langtag && strcmp(colliculocale, langtag) != 0)
{ {
@ -476,17 +476,18 @@ AlterCollation(AlterCollationStmt *stmt)
Datum Datum
pg_collation_actual_version(PG_FUNCTION_ARGS) pg_collation_actual_version(PG_FUNCTION_ARGS)
{ {
Oid collid = PG_GETARG_OID(0); Oid collid = PG_GETARG_OID(0);
char provider; char provider;
char *locale; char *locale;
char *version; char *version;
Datum datum; Datum datum;
if (collid == DEFAULT_COLLATION_OID) if (collid == DEFAULT_COLLATION_OID)
{ {
/* retrieve from pg_database */ /* retrieve from pg_database */
HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId)); HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
if (!HeapTupleIsValid(dbtup)) if (!HeapTupleIsValid(dbtup))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT), (errcode(ERRCODE_UNDEFINED_OBJECT),
@ -506,7 +507,8 @@ pg_collation_actual_version(PG_FUNCTION_ARGS)
{ {
/* retrieve from pg_collation */ /* retrieve from pg_collation */
HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
if (!HeapTupleIsValid(colltp)) if (!HeapTupleIsValid(colltp))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT), (errcode(ERRCODE_UNDEFINED_OBJECT),
@ -657,11 +659,10 @@ create_collation_from_locale(const char *locale, int nspid,
Oid collid; Oid collid;
/* /*
* Some systems have locale names that don't consist entirely of * Some systems have locale names that don't consist entirely of ASCII
* ASCII letters (such as "bokm&aring;l" or "fran&ccedil;ais"). * letters (such as "bokm&aring;l" or "fran&ccedil;ais"). This is pretty
* This is pretty silly, since we need the locale itself to * silly, since we need the locale itself to interpret the non-ASCII
* interpret the non-ASCII characters. We can't do much with * characters. We can't do much with those, so we filter them out.
* those, so we filter them out.
*/ */
if (!pg_is_ascii(locale)) if (!pg_is_ascii(locale))
{ {
@ -681,19 +682,18 @@ create_collation_from_locale(const char *locale, int nspid,
return -1; return -1;
} }
if (enc == PG_SQL_ASCII) if (enc == PG_SQL_ASCII)
return -1; /* C/POSIX are already in the catalog */ return -1; /* C/POSIX are already in the catalog */
/* count valid locales found in operating system */ /* count valid locales found in operating system */
(*nvalidp)++; (*nvalidp)++;
/* /*
* Create a collation named the same as the locale, but quietly * Create a collation named the same as the locale, but quietly doing
* doing nothing if it already exists. This is the behavior we * nothing if it already exists. This is the behavior we need even at
* need even at initdb time, because some versions of "locale -a" * initdb time, because some versions of "locale -a" can report the same
* can report the same locale name more than once. And it's * locale name more than once. And it's convenient for later import runs,
* convenient for later import runs, too, since you just about * too, since you just about always want to add on new locales without a
* always want to add on new locales without a lot of chatter * lot of chatter about existing ones.
* about existing ones.
*/ */
collid = CollationCreate(locale, nspid, GetUserId(), collid = CollationCreate(locale, nspid, GetUserId(),
COLLPROVIDER_LIBC, true, enc, COLLPROVIDER_LIBC, true, enc,
@ -995,8 +995,8 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
param.nvalidp = &nvalid; param.nvalidp = &nvalid;
/* /*
* Enumerate the locales that are either installed on or supported * Enumerate the locales that are either installed on or supported by
* by the OS. * the OS.
*/ */
if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL, if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL,
(LPARAM) &param, NULL)) (LPARAM) &param, NULL))

View File

@ -259,7 +259,7 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
List *rlocatorlist = NIL; List *rlocatorlist = NIL;
LockRelId relid; LockRelId relid;
Snapshot snapshot; Snapshot snapshot;
SMgrRelation smgr; SMgrRelation smgr;
BufferAccessStrategy bstrategy; BufferAccessStrategy bstrategy;
/* Get pg_class relfilenumber. */ /* Get pg_class relfilenumber. */
@ -1065,8 +1065,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
*/ */
if (!IsBinaryUpgrade && dbiculocale != src_iculocale) if (!IsBinaryUpgrade && dbiculocale != src_iculocale)
{ {
char *langtag = icu_language_tag(dbiculocale, char *langtag = icu_language_tag(dbiculocale,
icu_validation_level); icu_validation_level);
if (langtag && strcmp(dbiculocale, langtag) != 0) if (langtag && strcmp(dbiculocale, langtag) != 0)
{ {
@ -1219,7 +1219,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
dst_deftablespace = get_tablespace_oid(tablespacename, false); dst_deftablespace = get_tablespace_oid(tablespacename, false);
/* check permissions */ /* check permissions */
aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(), aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE, aclcheck_error(aclresult, OBJECT_TABLESPACE,
tablespacename); tablespacename);
@ -1406,8 +1406,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
* If we're going to be reading data for the to-be-created database into * If we're going to be reading data for the to-be-created database into
* shared_buffers, take a lock on it. Nobody should know that this * shared_buffers, take a lock on it. Nobody should know that this
* database exists yet, but it's good to maintain the invariant that an * database exists yet, but it's good to maintain the invariant that an
* AccessExclusiveLock on the database is sufficient to drop all * AccessExclusiveLock on the database is sufficient to drop all of its
* of its buffers without worrying about more being read later. * buffers without worrying about more being read later.
* *
* Note that we need to do this before entering the * Note that we need to do this before entering the
* PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback
@ -1933,7 +1933,7 @@ movedb(const char *dbname, const char *tblspcname)
* Permission checks * Permission checks
*/ */
aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(), aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE, aclcheck_error(aclresult, OBJECT_TABLESPACE,
tblspcname); tblspcname);
@ -3110,7 +3110,7 @@ dbase_redo(XLogReaderState *record)
if (info == XLOG_DBASE_CREATE_FILE_COPY) if (info == XLOG_DBASE_CREATE_FILE_COPY)
{ {
xl_dbase_create_file_copy_rec *xlrec = xl_dbase_create_file_copy_rec *xlrec =
(xl_dbase_create_file_copy_rec *) XLogRecGetData(record); (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
char *src_path; char *src_path;
char *dst_path; char *dst_path;
char *parent_path; char *parent_path;
@ -3182,7 +3182,7 @@ dbase_redo(XLogReaderState *record)
else if (info == XLOG_DBASE_CREATE_WAL_LOG) else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{ {
xl_dbase_create_wal_log_rec *xlrec = xl_dbase_create_wal_log_rec *xlrec =
(xl_dbase_create_wal_log_rec *) XLogRecGetData(record); (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
char *dbpath; char *dbpath;
char *parent_path; char *parent_path;

View File

@ -493,6 +493,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
case OBJECT_TABLE: case OBJECT_TABLE:
case OBJECT_TABLESPACE: case OBJECT_TABLESPACE:
case OBJECT_VIEW: case OBJECT_VIEW:
/* /*
* These are handled elsewhere, so if someone gets here the code * These are handled elsewhere, so if someone gets here the code
* is probably wrong or should be revisited. * is probably wrong or should be revisited.

View File

@ -1523,7 +1523,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
{ {
BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan; BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan;
const char *indexname = const char *indexname =
explain_get_index_name(bitmapindexscan->indexid); explain_get_index_name(bitmapindexscan->indexid);
if (es->format == EXPLAIN_FORMAT_TEXT) if (es->format == EXPLAIN_FORMAT_TEXT)
appendStringInfo(es->str, " on %s", appendStringInfo(es->str, " on %s",
@ -3008,7 +3008,7 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
for (n = 0; n < incrsortstate->shared_info->num_workers; n++) for (n = 0; n < incrsortstate->shared_info->num_workers; n++)
{ {
IncrementalSortInfo *incsort_info = IncrementalSortInfo *incsort_info =
&incrsortstate->shared_info->sinfo[n]; &incrsortstate->shared_info->sinfo[n];
/* /*
* If a worker hasn't processed any sort groups at all, then * If a worker hasn't processed any sort groups at all, then
@ -4212,7 +4212,7 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es)
{ {
ListCell *cell; ListCell *cell;
const char *label = const char *label =
(list_length(css->custom_ps) != 1 ? "children" : "child"); (list_length(css->custom_ps) != 1 ? "children" : "child");
foreach(cell, css->custom_ps) foreach(cell, css->custom_ps)
ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es); ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es);

View File

@ -151,7 +151,7 @@ compute_return_type(TypeName *returnType, Oid languageOid,
namespaceId = QualifiedNameGetCreationNamespace(returnType->names, namespaceId = QualifiedNameGetCreationNamespace(returnType->names,
&typname); &typname);
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(), aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId)); get_namespace_name(namespaceId));
@ -2117,7 +2117,7 @@ ExecuteDoStmt(ParseState *pstate, DoStmt *stmt, bool atomic)
AclResult aclresult; AclResult aclresult;
aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(), aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(),
ACL_USAGE); ACL_USAGE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_LANGUAGE, aclcheck_error(aclresult, OBJECT_LANGUAGE,
NameStr(languageStruct->lanname)); NameStr(languageStruct->lanname));

View File

@ -748,7 +748,7 @@ DefineIndex(Oid relationId,
AclResult aclresult; AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid, aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid,
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId)); get_namespace_name(namespaceId));
@ -780,7 +780,7 @@ DefineIndex(Oid relationId,
AclResult aclresult; AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid, aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid,
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE, aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId)); get_tablespace_name(tablespaceId));
@ -2708,7 +2708,7 @@ ExecReindex(ParseState *pstate, ReindexStmt *stmt, bool isTopLevel)
AclResult aclresult; AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid, aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid,
GetUserId(), ACL_CREATE); GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE, aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params.tablespaceOid)); get_tablespace_name(params.tablespaceOid));
@ -3066,11 +3066,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
/* /*
* The table can be reindexed if the user has been granted MAINTAIN on * The table can be reindexed if the user has been granted MAINTAIN on
* the table or one of its partition ancestors or the user is a * the table or one of its partition ancestors or the user is a
* superuser, the table owner, or the database/schema owner (but in the * superuser, the table owner, or the database/schema owner (but in
* latter case, only if it's not a shared relation). pg_class_aclcheck * the latter case, only if it's not a shared relation).
* includes the superuser case, and depending on objectKind we already * pg_class_aclcheck includes the superuser case, and depending on
* know that the user has permission to run REINDEX on this database or * objectKind we already know that the user has permission to run
* schema per the permission checks at the beginning of this routine. * REINDEX on this database or schema per the permission checks at the
* beginning of this routine.
*/ */
if (classtuple->relisshared && if (classtuple->relisshared &&
pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK && pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&
@ -3312,7 +3313,7 @@ ReindexMultipleInternal(List *relids, ReindexParams *params)
AclResult aclresult; AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid, aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid,
GetUserId(), ACL_CREATE); GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE, aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params->tablespaceOid)); get_tablespace_name(params->tablespaceOid));

View File

@ -400,7 +400,7 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId)
* no special case for them. * no special case for them.
*/ */
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_DATABASE, aclcheck_error(aclresult, OBJECT_DATABASE,
get_database_name(MyDatabaseId)); get_database_name(MyDatabaseId));

View File

@ -604,9 +604,9 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)"); PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)");
/* /*
* We don't want to allow unprivileged users to be able to trigger attempts * We don't want to allow unprivileged users to be able to trigger
* to access arbitrary network destinations, so require the user to have * attempts to access arbitrary network destinations, so require the user
* been specifically authorized to create subscriptions. * to have been specifically authorized to create subscriptions.
*/ */
if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION)) if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION))
ereport(ERROR, ereport(ERROR,
@ -631,10 +631,10 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
* exempt a subscription from this requirement. * exempt a subscription from this requirement.
*/ */
if (!opts.passwordrequired && !superuser_arg(owner)) if (!opts.passwordrequired && !superuser_arg(owner))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("password_required=false is superuser-only"), errmsg("password_required=false is superuser-only"),
errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* /*
* If built with appropriate switch, whine when regression-testing * If built with appropriate switch, whine when regression-testing
@ -1113,8 +1113,8 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
if (!sub->passwordrequired && !superuser()) if (!sub->passwordrequired && !superuser())
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("password_required=false is superuser-only"), errmsg("password_required=false is superuser-only"),
errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* Lock the subscription so nobody else can do anything with it. */ /* Lock the subscription so nobody else can do anything with it. */
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock); LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
@ -1827,8 +1827,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
if (!form->subpasswordrequired && !superuser()) if (!form->subpasswordrequired && !superuser())
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("password_required=false is superuser-only"), errmsg("password_required=false is superuser-only"),
errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* Must be able to become new owner */ /* Must be able to become new owner */
check_can_set_role(GetUserId(), newOwnerId); check_can_set_role(GetUserId(), newOwnerId);
@ -1837,8 +1837,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
* current owner must have CREATE on database * current owner must have CREATE on database
* *
* This is consistent with how ALTER SCHEMA ... OWNER TO works, but some * This is consistent with how ALTER SCHEMA ... OWNER TO works, but some
* other object types behave differently (e.g. you can't give a table to * other object types behave differently (e.g. you can't give a table to a
* a user who lacks CREATE privileges on a schema). * user who lacks CREATE privileges on a schema).
*/ */
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId,
GetUserId(), ACL_CREATE); GetUserId(), ACL_CREATE);

View File

@ -806,7 +806,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
AclResult aclresult; AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(), aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE, aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId)); get_tablespace_name(tablespaceId));
@ -1931,7 +1931,7 @@ ExecuteTruncateGuts(List *explicit_rels,
resultRelInfo = resultRelInfos; resultRelInfo = resultRelInfos;
foreach(cell, rels) foreach(cell, rels)
{ {
UserContext ucxt; UserContext ucxt;
if (run_as_table_owner) if (run_as_table_owner)
SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner, SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
@ -2143,7 +2143,7 @@ ExecuteTruncateGuts(List *explicit_rels,
resultRelInfo = resultRelInfos; resultRelInfo = resultRelInfos;
foreach(cell, rels) foreach(cell, rels)
{ {
UserContext ucxt; UserContext ucxt;
if (run_as_table_owner) if (run_as_table_owner)
SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner, SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
@ -2635,7 +2635,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
if (CompressionMethodIsValid(attribute->attcompression)) if (CompressionMethodIsValid(attribute->attcompression))
{ {
const char *compression = const char *compression =
GetCompressionMethodName(attribute->attcompression); GetCompressionMethodName(attribute->attcompression);
if (def->compression == NULL) if (def->compression == NULL)
def->compression = pstrdup(compression); def->compression = pstrdup(compression);
@ -13947,7 +13947,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock
/* New owner must have CREATE privilege on namespace */ /* New owner must have CREATE privilege on namespace */
aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId, aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId,
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceOid)); get_namespace_name(namespaceOid));
@ -14377,7 +14377,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
if (check_option) if (check_option)
{ {
const char *view_updatable_error = const char *view_updatable_error =
view_query_is_auto_updatable(view_query, true); view_query_is_auto_updatable(view_query, true);
if (view_updatable_error) if (view_updatable_error)
ereport(ERROR, ereport(ERROR,
@ -14656,7 +14656,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt)
AclResult aclresult; AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(), aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE, aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(new_tablespaceoid)); get_tablespace_name(new_tablespaceoid));
@ -17134,7 +17134,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
if (IsA(stmt, RenameStmt)) if (IsA(stmt, RenameStmt))
{ {
aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace, aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace,
GetUserId(), ACL_CREATE); GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(classform->relnamespace)); get_namespace_name(classform->relnamespace));

View File

@ -1278,7 +1278,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
/* Check permissions, similarly complaining only if interactive */ /* Check permissions, similarly complaining only if interactive */
aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(), aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
{ {
if (source >= PGC_S_INTERACTIVE) if (source >= PGC_S_INTERACTIVE)
@ -1408,7 +1408,7 @@ PrepareTempTablespaces(void)
/* Check permissions similarly */ /* Check permissions similarly */
aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(), aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
continue; continue;

View File

@ -734,7 +734,7 @@ DefineDomain(CreateDomainStmt *stmt)
/* Check we have creation rights in target namespace */ /* Check we have creation rights in target namespace */
aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(), aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(),
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(domainNamespace)); get_namespace_name(domainNamespace));
@ -3743,8 +3743,8 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
/* New owner must have CREATE privilege on namespace */ /* New owner must have CREATE privilege on namespace */
aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace, aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace,
newOwnerId, newOwnerId,
ACL_CREATE); ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA, aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(typTup->typnamespace)); get_namespace_name(typTup->typnamespace));

View File

@ -86,7 +86,7 @@ typedef struct
int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256; int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256;
char *createrole_self_grant = ""; char *createrole_self_grant = "";
bool createrole_self_grant_enabled = false; bool createrole_self_grant_enabled = false;
GrantRoleOptions createrole_self_grant_options; GrantRoleOptions createrole_self_grant_options;
/* Hook to check passwords in CreateRole() and AlterRole() */ /* Hook to check passwords in CreateRole() and AlterRole() */
check_password_hook_type check_password_hook = NULL; check_password_hook_type check_password_hook = NULL;
@ -169,7 +169,7 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
DefElem *dadminmembers = NULL; DefElem *dadminmembers = NULL;
DefElem *dvalidUntil = NULL; DefElem *dvalidUntil = NULL;
DefElem *dbypassRLS = NULL; DefElem *dbypassRLS = NULL;
GrantRoleOptions popt; GrantRoleOptions popt;
/* The defaults can vary depending on the original statement type */ /* The defaults can vary depending on the original statement type */
switch (stmt->stmt_type) switch (stmt->stmt_type)
@ -535,8 +535,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
* *
* The grantor of record for this implicit grant is the bootstrap * The grantor of record for this implicit grant is the bootstrap
* superuser, which means that the CREATEROLE user cannot revoke the * superuser, which means that the CREATEROLE user cannot revoke the
* grant. They can however grant the created role back to themselves * grant. They can however grant the created role back to themselves with
* with different options, since they enjoy ADMIN OPTION on it. * different options, since they enjoy ADMIN OPTION on it.
*/ */
if (!superuser()) if (!superuser())
{ {
@ -561,8 +561,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
BOOTSTRAP_SUPERUSERID, &poptself); BOOTSTRAP_SUPERUSERID, &poptself);
/* /*
* We must make the implicit grant visible to the code below, else * We must make the implicit grant visible to the code below, else the
* the additional grants will fail. * additional grants will fail.
*/ */
CommandCounterIncrement(); CommandCounterIncrement();
@ -585,8 +585,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
* Add the specified members to this new role. adminmembers get the admin * Add the specified members to this new role. adminmembers get the admin
* option, rolemembers don't. * option, rolemembers don't.
* *
* NB: No permissions check is required here. If you have enough rights * NB: No permissions check is required here. If you have enough rights to
* to create a role, you can add any members you like. * create a role, you can add any members you like.
*/ */
AddRoleMems(currentUserId, stmt->role, roleid, AddRoleMems(currentUserId, stmt->role, roleid,
rolemembers, roleSpecsToIds(rolemembers), rolemembers, roleSpecsToIds(rolemembers),
@ -647,7 +647,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt)
DefElem *dbypassRLS = NULL; DefElem *dbypassRLS = NULL;
Oid roleid; Oid roleid;
Oid currentUserId = GetUserId(); Oid currentUserId = GetUserId();
GrantRoleOptions popt; GrantRoleOptions popt;
check_rolespec_name(stmt->role, check_rolespec_name(stmt->role,
_("Cannot alter reserved roles.")); _("Cannot alter reserved roles."));
@ -862,7 +862,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt)
*/ */
if (dissuper) if (dissuper)
{ {
bool should_be_super = boolVal(dissuper->arg); bool should_be_super = boolVal(dissuper->arg);
if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID) if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID)
ereport(ERROR, ereport(ERROR,
@ -1021,9 +1021,9 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
shdepLockAndCheckObject(AuthIdRelationId, roleid); shdepLockAndCheckObject(AuthIdRelationId, roleid);
/* /*
* To mess with a superuser you gotta be superuser; otherwise you * To mess with a superuser you gotta be superuser; otherwise you need
* need CREATEROLE plus admin option on the target role; unless you're * CREATEROLE plus admin option on the target role; unless you're just
* just trying to change your own settings * trying to change your own settings
*/ */
if (roleform->rolsuper) if (roleform->rolsuper)
{ {
@ -1037,7 +1037,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
else else
{ {
if ((!have_createrole_privilege() || if ((!have_createrole_privilege() ||
!is_admin_of_role(GetUserId(), roleid)) !is_admin_of_role(GetUserId(), roleid))
&& roleid != GetUserId()) && roleid != GetUserId())
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@ -1490,14 +1490,14 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt)
Oid grantor; Oid grantor;
List *grantee_ids; List *grantee_ids;
ListCell *item; ListCell *item;
GrantRoleOptions popt; GrantRoleOptions popt;
Oid currentUserId = GetUserId(); Oid currentUserId = GetUserId();
/* Parse options list. */ /* Parse options list. */
InitGrantRoleOptions(&popt); InitGrantRoleOptions(&popt);
foreach(item, stmt->opt) foreach(item, stmt->opt)
{ {
DefElem *opt = (DefElem *) lfirst(item); DefElem *opt = (DefElem *) lfirst(item);
char *optval = defGetString(opt); char *optval = defGetString(opt);
if (strcmp(opt->defname, "admin") == 0) if (strcmp(opt->defname, "admin") == 0)
@ -1546,8 +1546,8 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt)
/* /*
* Step through all of the granted roles and add, update, or remove * Step through all of the granted roles and add, update, or remove
* entries in pg_auth_members as appropriate. If stmt->is_grant is true, * entries in pg_auth_members as appropriate. If stmt->is_grant is true,
* we are adding new grants or, if they already exist, updating options * we are adding new grants or, if they already exist, updating options on
* on those grants. If stmt->is_grant is false, we are revoking grants or * those grants. If stmt->is_grant is false, we are revoking grants or
* removing options from them. * removing options from them.
*/ */
foreach(item, stmt->granted_roles) foreach(item, stmt->granted_roles)
@ -1848,8 +1848,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
ObjectIdGetDatum(grantorId)); ObjectIdGetDatum(grantorId));
/* /*
* If we found a tuple, update it with new option values, unless * If we found a tuple, update it with new option values, unless there
* there are no changes, in which case issue a WARNING. * are no changes, in which case issue a WARNING.
* *
* If we didn't find a tuple, just insert one. * If we didn't find a tuple, just insert one.
*/ */
@ -1932,8 +1932,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
popt->inherit; popt->inherit;
else else
{ {
HeapTuple mrtup; HeapTuple mrtup;
Form_pg_authid mrform; Form_pg_authid mrform;
mrtup = SearchSysCache1(AUTHOID, memberid); mrtup = SearchSysCache1(AUTHOID, memberid);
if (!HeapTupleIsValid(mrtup)) if (!HeapTupleIsValid(mrtup))
@ -2332,8 +2332,8 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions,
/* /*
* If popt.specified == 0, we're revoking the grant entirely; otherwise, * If popt.specified == 0, we're revoking the grant entirely; otherwise,
* we expect just one bit to be set, and we're revoking the corresponding * we expect just one bit to be set, and we're revoking the corresponding
* option. As of this writing, there's no syntax that would allow for * option. As of this writing, there's no syntax that would allow for an
* an attempt to revoke multiple options at once, and the logic below * attempt to revoke multiple options at once, and the logic below
* wouldn't work properly if such syntax were added, so assert that our * wouldn't work properly if such syntax were added, so assert that our
* caller isn't trying to do that. * caller isn't trying to do that.
*/ */
@ -2365,7 +2365,7 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions,
} }
else else
{ {
bool revoke_admin_option_only; bool revoke_admin_option_only;
/* /*
* Revoking the grant entirely, or ADMIN option on a grant, * Revoking the grant entirely, or ADMIN option on a grant,
@ -2572,7 +2572,7 @@ check_createrole_self_grant(char **newval, void **extra, GucSource source)
void void
assign_createrole_self_grant(const char *newval, void *extra) assign_createrole_self_grant(const char *newval, void *extra)
{ {
unsigned options = * (unsigned *) extra; unsigned options = *(unsigned *) extra;
createrole_self_grant_enabled = (options != 0); createrole_self_grant_enabled = (options != 0);
createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN

View File

@ -437,7 +437,7 @@ DefineView(ViewStmt *stmt, const char *queryString,
if (check_option) if (check_option)
{ {
const char *view_updatable_error = const char *view_updatable_error =
view_query_is_auto_updatable(viewParse, true); view_query_is_auto_updatable(viewParse, true);
if (view_updatable_error) if (view_updatable_error)
ereport(ERROR, ereport(ERROR,

View File

@ -1214,8 +1214,8 @@ ExecInitExprRec(Expr *node, ExprState *state,
/* Check permission to call function */ /* Check permission to call function */
aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid, aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid,
GetUserId(), GetUserId(),
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(cmpfuncid)); get_func_name(cmpfuncid));
@ -1224,8 +1224,8 @@ ExecInitExprRec(Expr *node, ExprState *state,
if (OidIsValid(opexpr->hashfuncid)) if (OidIsValid(opexpr->hashfuncid))
{ {
aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid, aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid,
GetUserId(), GetUserId(),
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(opexpr->hashfuncid)); get_func_name(opexpr->hashfuncid));
@ -3613,7 +3613,7 @@ ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase,
* column sorted on. * column sorted on.
*/ */
TargetEntry *source_tle = TargetEntry *source_tle =
(TargetEntry *) linitial(pertrans->aggref->args); (TargetEntry *) linitial(pertrans->aggref->args);
Assert(list_length(pertrans->aggref->args) == 1); Assert(list_length(pertrans->aggref->args) == 1);

View File

@ -1659,7 +1659,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{ {
AggState *aggstate = castNode(AggState, state->parent); AggState *aggstate = castNode(AggState, state->parent);
AggStatePerGroup pergroup_allaggs = AggStatePerGroup pergroup_allaggs =
aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff]; aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL) if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull); EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
@ -1684,7 +1684,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent); AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal); Assert(pertrans->transtypeByVal);
@ -1712,7 +1712,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent); AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal); Assert(pertrans->transtypeByVal);
@ -1730,7 +1730,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent); AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal); Assert(pertrans->transtypeByVal);
@ -1747,7 +1747,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent); AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal); Assert(!pertrans->transtypeByVal);
@ -1768,7 +1768,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent); AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal); Assert(!pertrans->transtypeByVal);
@ -1785,7 +1785,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
AggState *aggstate = castNode(AggState, state->parent); AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal); Assert(!pertrans->transtypeByVal);

View File

@ -354,8 +354,8 @@ ExecInsertIndexTuples(ResultRelInfo *resultRelInfo,
continue; continue;
/* /*
* Skip processing of non-summarizing indexes if we only * Skip processing of non-summarizing indexes if we only update
* update summarizing indexes * summarizing indexes
*/ */
if (onlySummarizing && !indexInfo->ii_Summarizing) if (onlySummarizing && !indexInfo->ii_Summarizing)
continue; continue;

View File

@ -260,7 +260,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
if (first_time) if (first_time)
{ {
MemoryContext oldcontext = MemoryContext oldcontext =
MemoryContextSwitchTo(econtext->ecxt_per_query_memory); MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore; rsinfo.setResult = tupstore;
@ -290,7 +290,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
if (tupdesc == NULL) if (tupdesc == NULL)
{ {
MemoryContext oldcontext = MemoryContext oldcontext =
MemoryContextSwitchTo(econtext->ecxt_per_query_memory); MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
/* /*
* This is the first non-NULL result from the * This is the first non-NULL result from the
@ -395,7 +395,7 @@ no_function_result:
if (rsinfo.setResult == NULL) if (rsinfo.setResult == NULL)
{ {
MemoryContext oldcontext = MemoryContext oldcontext =
MemoryContextSwitchTo(econtext->ecxt_per_query_memory); MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore; rsinfo.setResult = tupstore;

View File

@ -3690,7 +3690,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/* Check permission to call aggregate function */ /* Check permission to call aggregate function */
aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(), aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(),
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_AGGREGATE, aclcheck_error(aclresult, OBJECT_AGGREGATE,
get_func_name(aggref->aggfnoid)); get_func_name(aggref->aggfnoid));
@ -3757,7 +3757,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
if (OidIsValid(finalfn_oid)) if (OidIsValid(finalfn_oid))
{ {
aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner, aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(finalfn_oid)); get_func_name(finalfn_oid));
@ -3766,7 +3766,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
if (OidIsValid(serialfn_oid)) if (OidIsValid(serialfn_oid))
{ {
aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner, aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner,
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(serialfn_oid)); get_func_name(serialfn_oid));
@ -3775,7 +3775,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
if (OidIsValid(deserialfn_oid)) if (OidIsValid(deserialfn_oid))
{ {
aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner, aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner,
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(deserialfn_oid)); get_func_name(deserialfn_oid));

View File

@ -1339,7 +1339,7 @@ ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
else else
{ {
size_t tuple_size = size_t tuple_size =
MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
/* It belongs in a later batch. */ /* It belongs in a later batch. */
hashtable->batches[batchno].estimated_size += tuple_size; hashtable->batches[batchno].estimated_size += tuple_size;
@ -1381,7 +1381,7 @@ ExecParallelHashRepartitionRest(HashJoinTable hashtable)
for (i = 1; i < old_nbatch; ++i) for (i = 1; i < old_nbatch; ++i)
{ {
ParallelHashJoinBatch *shared = ParallelHashJoinBatch *shared =
NthParallelHashJoinBatch(old_batches, i); NthParallelHashJoinBatch(old_batches, i);
old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared), old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
ParallelWorkerNumber + 1, ParallelWorkerNumber + 1,
@ -3337,7 +3337,7 @@ ExecHashTableDetachBatch(HashJoinTable hashtable)
while (DsaPointerIsValid(batch->chunks)) while (DsaPointerIsValid(batch->chunks))
{ {
HashMemoryChunk chunk = HashMemoryChunk chunk =
dsa_get_address(hashtable->area, batch->chunks); dsa_get_address(hashtable->area, batch->chunks);
dsa_pointer next = chunk->next.shared; dsa_pointer next = chunk->next.shared;
dsa_free(hashtable->area, batch->chunks); dsa_free(hashtable->area, batch->chunks);

View File

@ -1216,7 +1216,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
{ {
SharedTuplestoreAccessor *inner_tuples; SharedTuplestoreAccessor *inner_tuples;
Barrier *batch_barrier = Barrier *batch_barrier =
&hashtable->batches[batchno].shared->batch_barrier; &hashtable->batches[batchno].shared->batch_barrier;
switch (BarrierAttach(batch_barrier)) switch (BarrierAttach(batch_barrier))
{ {
@ -1330,22 +1330,22 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
BufFile *file = *fileptr; BufFile *file = *fileptr;
/* /*
* The batch file is lazily created. If this is the first tuple * The batch file is lazily created. If this is the first tuple written to
* written to this batch, the batch file is created and its buffer is * this batch, the batch file is created and its buffer is allocated in
* allocated in the spillCxt context, NOT in the batchCxt. * the spillCxt context, NOT in the batchCxt.
* *
* During the build phase, buffered files are created for inner * During the build phase, buffered files are created for inner batches.
* batches. Each batch's buffered file is closed (and its buffer freed) * Each batch's buffered file is closed (and its buffer freed) after the
* after the batch is loaded into memory during the outer side scan. * batch is loaded into memory during the outer side scan. Therefore, it
* Therefore, it is necessary to allocate the batch file buffer in a * is necessary to allocate the batch file buffer in a memory context
* memory context which outlives the batch itself. * which outlives the batch itself.
* *
* Also, we use spillCxt instead of hashCxt for a better accounting of * Also, we use spillCxt instead of hashCxt for a better accounting of the
* the spilling memory consumption. * spilling memory consumption.
*/ */
if (file == NULL) if (file == NULL)
{ {
MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt); MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
file = BufFileCreateTemp(false); file = BufFileCreateTemp(false);
*fileptr = file; *fileptr = file;
@ -1622,7 +1622,7 @@ ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
{ {
int plan_node_id = state->js.ps.plan->plan_node_id; int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate = ParallelHashJoinState *pstate =
shm_toc_lookup(pcxt->toc, plan_node_id, false); shm_toc_lookup(pcxt->toc, plan_node_id, false);
/* /*
* It would be possible to reuse the shared hash table in single-batch * It would be possible to reuse the shared hash table in single-batch
@ -1657,7 +1657,7 @@ ExecHashJoinInitializeWorker(HashJoinState *state,
HashState *hashNode; HashState *hashNode;
int plan_node_id = state->js.ps.plan->plan_node_id; int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate = ParallelHashJoinState *pstate =
shm_toc_lookup(pwcxt->toc, plan_node_id, false); shm_toc_lookup(pwcxt->toc, plan_node_id, false);
/* Attach to the space for shared temporary files. */ /* Attach to the space for shared temporary files. */
SharedFileSetAttach(&pstate->fileset, pwcxt->seg); SharedFileSetAttach(&pstate->fileset, pwcxt->seg);

View File

@ -1007,9 +1007,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
if (incrsortstate->ss.ps.instrument != NULL) if (incrsortstate->ss.ps.instrument != NULL)
{ {
IncrementalSortGroupInfo *fullsortGroupInfo = IncrementalSortGroupInfo *fullsortGroupInfo =
&incrsortstate->incsort_info.fullsortGroupInfo; &incrsortstate->incsort_info.fullsortGroupInfo;
IncrementalSortGroupInfo *prefixsortGroupInfo = IncrementalSortGroupInfo *prefixsortGroupInfo =
&incrsortstate->incsort_info.prefixsortGroupInfo; &incrsortstate->incsort_info.prefixsortGroupInfo;
fullsortGroupInfo->groupCount = 0; fullsortGroupInfo->groupCount = 0;
fullsortGroupInfo->maxDiskSpaceUsed = 0; fullsortGroupInfo->maxDiskSpaceUsed = 0;

View File

@ -111,7 +111,7 @@ typedef struct UpdateContext
{ {
bool updated; /* did UPDATE actually occur? */ bool updated; /* did UPDATE actually occur? */
bool crossPartUpdate; /* was it a cross-partition update? */ bool crossPartUpdate; /* was it a cross-partition update? */
TU_UpdateIndexes updateIndexes; /* Which index updates are required? */ TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
/* /*
* Lock mode to acquire on the latest tuple version before performing * Lock mode to acquire on the latest tuple version before performing
@ -881,7 +881,7 @@ ExecInsert(ModifyTableContext *context,
{ {
TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor); TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
TupleDesc plan_tdesc = TupleDesc plan_tdesc =
CreateTupleDescCopy(planSlot->tts_tupleDescriptor); CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] = resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
MakeSingleTupleTableSlot(tdesc, slot->tts_ops); MakeSingleTupleTableSlot(tdesc, slot->tts_ops);

View File

@ -352,7 +352,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc)
int colno; int colno;
Datum value; Datum value;
int ordinalitycol = int ordinalitycol =
((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
/* /*
* Install the document as a possibly-toasted Datum into the tablefunc * Install the document as a possibly-toasted Datum into the tablefunc

View File

@ -2582,7 +2582,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
/* Check permission to call window function */ /* Check permission to call window function */
aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(), aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(),
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(wfunc->winfnoid)); get_func_name(wfunc->winfnoid));
@ -2821,7 +2821,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
if (!OidIsValid(aggform->aggminvtransfn)) if (!OidIsValid(aggform->aggminvtransfn))
use_ma_code = false; /* sine qua non */ use_ma_code = false; /* sine qua non */
else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY && else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY &&
aggform->aggfinalmodify != AGGMODIFY_READ_ONLY) aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
use_ma_code = true; /* decision forced by safety */ use_ma_code = true; /* decision forced by safety */
else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
use_ma_code = false; /* non-moving frame head */ use_ma_code = false; /* non-moving frame head */
@ -2871,7 +2871,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
ReleaseSysCache(procTuple); ReleaseSysCache(procTuple);
aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner, aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner,
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(transfn_oid)); get_func_name(transfn_oid));
@ -2880,7 +2880,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
if (OidIsValid(invtransfn_oid)) if (OidIsValid(invtransfn_oid))
{ {
aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner, aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner,
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(invtransfn_oid)); get_func_name(invtransfn_oid));
@ -2890,7 +2890,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
if (OidIsValid(finalfn_oid)) if (OidIsValid(finalfn_oid))
{ {
aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner, aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
ACL_EXECUTE); ACL_EXECUTE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION, aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(finalfn_oid)); get_func_name(finalfn_oid));

View File

@ -3345,7 +3345,7 @@ SPI_register_trigger_data(TriggerData *tdata)
if (tdata->tg_newtable) if (tdata->tg_newtable)
{ {
EphemeralNamedRelation enr = EphemeralNamedRelation enr =
palloc(sizeof(EphemeralNamedRelationData)); palloc(sizeof(EphemeralNamedRelationData));
int rc; int rc;
enr->md.name = tdata->tg_trigger->tgnewtable; enr->md.name = tdata->tg_trigger->tgnewtable;
@ -3362,7 +3362,7 @@ SPI_register_trigger_data(TriggerData *tdata)
if (tdata->tg_oldtable) if (tdata->tg_oldtable)
{ {
EphemeralNamedRelation enr = EphemeralNamedRelation enr =
palloc(sizeof(EphemeralNamedRelationData)); palloc(sizeof(EphemeralNamedRelationData));
int rc; int rc;
enr->md.name = tdata->tg_trigger->tgoldtable; enr->md.name = tdata->tg_trigger->tgoldtable;

View File

@ -799,9 +799,9 @@ llvm_session_initialize(void)
LLVMInitializeNativeAsmParser(); LLVMInitializeNativeAsmParser();
/* /*
* When targeting an LLVM version with opaque pointers enabled by * When targeting an LLVM version with opaque pointers enabled by default,
* default, turn them off for the context we build our code in. We don't * turn them off for the context we build our code in. We don't need to
* need to do so for other contexts (e.g. llvm_ts_context). Once the IR is * do so for other contexts (e.g. llvm_ts_context). Once the IR is
* generated, it carries the necessary information. * generated, it carries the necessary information.
*/ */
#if LLVM_VERSION_MAJOR > 14 #if LLVM_VERSION_MAJOR > 14
@ -1118,7 +1118,7 @@ llvm_resolve_symbol(const char *symname, void *ctx)
static LLVMErrorRef static LLVMErrorRef
llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx, llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx,
LLVMOrcLookupStateRef * LookupState, LLVMOrcLookupKind Kind, LLVMOrcLookupStateRef *LookupState, LLVMOrcLookupKind Kind,
LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags, LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags,
LLVMOrcCLookupSet LookupSet, size_t LookupSetSize) LLVMOrcCLookupSet LookupSet, size_t LookupSetSize)
{ {
@ -1175,7 +1175,7 @@ static LLVMOrcObjectLayerRef
llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple) llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple)
{ {
LLVMOrcObjectLayerRef objlayer = LLVMOrcObjectLayerRef objlayer =
LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES); LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
#if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER #if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER
if (jit_debugging_support) if (jit_debugging_support)

View File

@ -650,7 +650,7 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc,
{ {
LLVMValueRef v_tmp_loaddata; LLVMValueRef v_tmp_loaddata;
LLVMTypeRef vartypep = LLVMTypeRef vartypep =
LLVMPointerType(LLVMIntType(att->attlen * 8), 0); LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
v_tmp_loaddata = v_tmp_loaddata =
LLVMBuildPointerCast(b, v_attdatap, vartypep, ""); LLVMBuildPointerCast(b, v_attdatap, vartypep, "");

View File

@ -1047,7 +1047,7 @@ llvm_compile_expr(ExprState *state)
else else
{ {
LLVMValueRef v_value = LLVMValueRef v_value =
LLVMBuildLoad(b, v_resvaluep, ""); LLVMBuildLoad(b, v_resvaluep, "");
v_value = LLVMBuildZExt(b, v_value = LLVMBuildZExt(b,
LLVMBuildICmp(b, LLVMIntEQ, LLVMBuildICmp(b, LLVMIntEQ,
@ -2127,8 +2127,7 @@ llvm_compile_expr(ExprState *state)
/* /*
* pergroup = &aggstate->all_pergroups * pergroup = &aggstate->all_pergroups
* [op->d.agg_trans.setoff] * [op->d.agg_trans.setoff] [op->d.agg_trans.transno];
* [op->d.agg_trans.transno];
*/ */
v_allpergroupsp = v_allpergroupsp =
l_load_struct_gep(b, v_aggstatep, l_load_struct_gep(b, v_aggstatep,

View File

@ -527,8 +527,8 @@ secure_open_gssapi(Port *port)
/* /*
* Use the configured keytab, if there is one. As we now require MIT * Use the configured keytab, if there is one. As we now require MIT
* Kerberos, we might consider using the credential store extensions in the * Kerberos, we might consider using the credential store extensions in
* future instead of the environment variable. * the future instead of the environment variable.
*/ */
if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0') if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0')
{ {

View File

@ -1104,8 +1104,8 @@ prepare_cert_name(char *name)
if (namelen > MAXLEN) if (namelen > MAXLEN)
{ {
/* /*
* Keep the end of the name, not the beginning, since the most specific * Keep the end of the name, not the beginning, since the most
* field is likely to give users the most information. * specific field is likely to give users the most information.
*/ */
truncated = name + namelen - MAXLEN; truncated = name + namelen - MAXLEN;
truncated[0] = truncated[1] = truncated[2] = '.'; truncated[0] = truncated[1] = truncated[2] = '.';
@ -1165,8 +1165,8 @@ verify_cb(int ok, X509_STORE_CTX *ctx)
/* /*
* Get the Subject and Issuer for logging, but don't let maliciously * Get the Subject and Issuer for logging, but don't let maliciously
* huge certs flood the logs, and don't reflect non-ASCII bytes into it * huge certs flood the logs, and don't reflect non-ASCII bytes into
* either. * it either.
*/ */
subject = X509_NAME_to_cstring(X509_get_subject_name(cert)); subject = X509_NAME_to_cstring(X509_get_subject_name(cert));
sub_prepared = prepare_cert_name(subject); sub_prepared = prepare_cert_name(subject);

View File

@ -2693,8 +2693,9 @@ load_hba(void)
if (!ok) if (!ok)
{ {
/* /*
* File contained one or more errors, so bail out. MemoryContextDelete * File contained one or more errors, so bail out.
* is enough to clean up everything, including regexes. * MemoryContextDelete is enough to clean up everything, including
* regexes.
*/ */
MemoryContextDelete(hbacxt); MemoryContextDelete(hbacxt);
return false; return false;
@ -3056,8 +3057,9 @@ load_ident(void)
if (!ok) if (!ok)
{ {
/* /*
* File contained one or more errors, so bail out. MemoryContextDelete * File contained one or more errors, so bail out.
* is enough to clean up everything, including regexes. * MemoryContextDelete is enough to clean up everything, including
* regexes.
*/ */
MemoryContextDelete(ident_context); MemoryContextDelete(ident_context);
return false; return false;

View File

@ -106,7 +106,7 @@ my @nodetag_only_files = qw(
# In HEAD, these variables should be left undef, since we don't promise # In HEAD, these variables should be left undef, since we don't promise
# ABI stability during development. # ABI stability during development.
my $last_nodetag = undef; my $last_nodetag = undef;
my $last_nodetag_no = undef; my $last_nodetag_no = undef;
# output file names # output file names
@ -161,9 +161,9 @@ push @node_types, qw(List);
# (Ideally we'd mark List as "special copy/equal" not "no copy/equal". # (Ideally we'd mark List as "special copy/equal" not "no copy/equal".
# But until there's other use-cases for that, just hot-wire the tests # But until there's other use-cases for that, just hot-wire the tests
# that would need to distinguish.) # that would need to distinguish.)
push @no_copy, qw(List); push @no_copy, qw(List);
push @no_equal, qw(List); push @no_equal, qw(List);
push @no_query_jumble, qw(List); push @no_query_jumble, qw(List);
push @special_read_write, qw(List); push @special_read_write, qw(List);
# Nodes with custom copy/equal implementations are skipped from # Nodes with custom copy/equal implementations are skipped from
@ -230,7 +230,7 @@ foreach my $infile (@ARGV)
} }
$file_content .= $raw_file_content; $file_content .= $raw_file_content;
my $lineno = 0; my $lineno = 0;
my $prevline = ''; my $prevline = '';
foreach my $line (split /\n/, $file_content) foreach my $line (split /\n/, $file_content)
{ {
@ -247,7 +247,7 @@ foreach my $infile (@ARGV)
if ($line =~ /;$/) if ($line =~ /;$/)
{ {
# found the end, re-attach any previous line(s) # found the end, re-attach any previous line(s)
$line = $prevline . $line; $line = $prevline . $line;
$prevline = ''; $prevline = '';
} }
elsif ($prevline eq '' elsif ($prevline eq ''
@ -272,7 +272,7 @@ foreach my $infile (@ARGV)
if ($subline == 1) if ($subline == 1)
{ {
$is_node_struct = 0; $is_node_struct = 0;
$supertype = undef; $supertype = undef;
next if $line eq '{'; next if $line eq '{';
die "$infile:$lineno: expected opening brace\n"; die "$infile:$lineno: expected opening brace\n";
} }
@ -280,7 +280,7 @@ foreach my $infile (@ARGV)
elsif ($subline == 2 elsif ($subline == 2
&& $line =~ /^\s*pg_node_attr\(([\w(), ]*)\)$/) && $line =~ /^\s*pg_node_attr\(([\w(), ]*)\)$/)
{ {
$node_attrs = $1; $node_attrs = $1;
$node_attrs_lineno = $lineno; $node_attrs_lineno = $lineno;
# hack: don't count the line # hack: don't count the line
$subline--; $subline--;
@ -296,8 +296,8 @@ foreach my $infile (@ARGV)
} }
elsif ($line =~ /\s*(\w+)\s+(\w+);/ and elem $1, @node_types) elsif ($line =~ /\s*(\w+)\s+(\w+);/ and elem $1, @node_types)
{ {
$is_node_struct = 1; $is_node_struct = 1;
$supertype = $1; $supertype = $1;
$supertype_field = $2; $supertype_field = $2;
next; next;
} }
@ -339,7 +339,7 @@ foreach my $infile (@ARGV)
} }
elsif ($attr eq 'no_copy_equal') elsif ($attr eq 'no_copy_equal')
{ {
push @no_copy, $in_struct; push @no_copy, $in_struct;
push @no_equal, $in_struct; push @no_equal, $in_struct;
} }
elsif ($attr eq 'no_query_jumble') elsif ($attr eq 'no_query_jumble')
@ -373,7 +373,7 @@ foreach my $infile (@ARGV)
push @node_types, $in_struct; push @node_types, $in_struct;
# field names, types, attributes # field names, types, attributes
my @f = @my_fields; my @f = @my_fields;
my %ft = %my_field_types; my %ft = %my_field_types;
my %fa = %my_field_attrs; my %fa = %my_field_attrs;
@ -405,7 +405,7 @@ foreach my $infile (@ARGV)
unshift @f, @superfields; unshift @f, @superfields;
} }
# save in global info structure # save in global info structure
$node_type_info{$in_struct}->{fields} = \@f; $node_type_info{$in_struct}->{fields} = \@f;
$node_type_info{$in_struct}->{field_types} = \%ft; $node_type_info{$in_struct}->{field_types} = \%ft;
$node_type_info{$in_struct}->{field_attrs} = \%fa; $node_type_info{$in_struct}->{field_attrs} = \%fa;
@ -428,9 +428,9 @@ foreach my $infile (@ARGV)
} }
# start new cycle # start new cycle
$in_struct = undef; $in_struct = undef;
$node_attrs = ''; $node_attrs = '';
@my_fields = (); @my_fields = ();
%my_field_types = (); %my_field_types = ();
%my_field_attrs = (); %my_field_attrs = ();
} }
@ -441,10 +441,10 @@ foreach my $infile (@ARGV)
{ {
if ($is_node_struct) if ($is_node_struct)
{ {
my $type = $1; my $type = $1;
my $name = $2; my $name = $2;
my $array_size = $3; my $array_size = $3;
my $attrs = $4; my $attrs = $4;
# strip "const" # strip "const"
$type =~ s/^const\s*//; $type =~ s/^const\s*//;
@ -499,9 +499,9 @@ foreach my $infile (@ARGV)
{ {
if ($is_node_struct) if ($is_node_struct)
{ {
my $type = $1; my $type = $1;
my $name = $2; my $name = $2;
my $args = $3; my $args = $3;
my $attrs = $4; my $attrs = $4;
my @attrs; my @attrs;
@ -540,20 +540,20 @@ foreach my $infile (@ARGV)
if ($line =~ /^(?:typedef )?struct (\w+)$/ && $1 ne 'Node') if ($line =~ /^(?:typedef )?struct (\w+)$/ && $1 ne 'Node')
{ {
$in_struct = $1; $in_struct = $1;
$subline = 0; $subline = 0;
} }
# one node type typedef'ed directly from another # one node type typedef'ed directly from another
elsif ($line =~ /^typedef (\w+) (\w+);$/ and elem $1, @node_types) elsif ($line =~ /^typedef (\w+) (\w+);$/ and elem $1, @node_types)
{ {
my $alias_of = $1; my $alias_of = $1;
my $n = $2; my $n = $2;
# copy everything over # copy everything over
push @node_types, $n; push @node_types, $n;
my @f = @{ $node_type_info{$alias_of}->{fields} }; my @f = @{ $node_type_info{$alias_of}->{fields} };
my %ft = %{ $node_type_info{$alias_of}->{field_types} }; my %ft = %{ $node_type_info{$alias_of}->{field_types} };
my %fa = %{ $node_type_info{$alias_of}->{field_attrs} }; my %fa = %{ $node_type_info{$alias_of}->{field_attrs} };
$node_type_info{$n}->{fields} = \@f; $node_type_info{$n}->{fields} = \@f;
$node_type_info{$n}->{field_types} = \%ft; $node_type_info{$n}->{field_types} = \%ft;
$node_type_info{$n}->{field_attrs} = \%fa; $node_type_info{$n}->{field_attrs} = \%fa;
} }
@ -608,7 +608,7 @@ open my $nt, '>', "$output_path/nodetags.h$tmpext"
printf $nt $header_comment, 'nodetags.h'; printf $nt $header_comment, 'nodetags.h';
my $tagno = 0; my $tagno = 0;
my $last_tag = undef; my $last_tag = undef;
foreach my $n (@node_types, @extra_tags) foreach my $n (@node_types, @extra_tags)
{ {
@ -669,7 +669,7 @@ foreach my $n (@node_types)
{ {
next if elem $n, @abstract_types; next if elem $n, @abstract_types;
next if elem $n, @nodetag_only; next if elem $n, @nodetag_only;
my $struct_no_copy = (elem $n, @no_copy); my $struct_no_copy = (elem $n, @no_copy);
my $struct_no_equal = (elem $n, @no_equal); my $struct_no_equal = (elem $n, @no_equal);
next if $struct_no_copy && $struct_no_equal; next if $struct_no_copy && $struct_no_equal;
@ -705,15 +705,15 @@ _equal${n}(const $n *a, const $n *b)
# print instructions for each field # print instructions for each field
foreach my $f (@{ $node_type_info{$n}->{fields} }) foreach my $f (@{ $node_type_info{$n}->{fields} })
{ {
my $t = $node_type_info{$n}->{field_types}{$f}; my $t = $node_type_info{$n}->{field_types}{$f};
my @a = @{ $node_type_info{$n}->{field_attrs}{$f} }; my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
my $copy_ignore = $struct_no_copy; my $copy_ignore = $struct_no_copy;
my $equal_ignore = $struct_no_equal; my $equal_ignore = $struct_no_equal;
# extract per-field attributes # extract per-field attributes
my $array_size_field; my $array_size_field;
my $copy_as_field; my $copy_as_field;
my $copy_as_scalar = 0; my $copy_as_scalar = 0;
my $equal_as_scalar = 0; my $equal_as_scalar = 0;
foreach my $a (@a) foreach my $a (@a)
{ {
@ -768,7 +768,7 @@ _equal${n}(const $n *a, const $n *b)
# select instructions by field type # select instructions by field type
if ($t eq 'char*') if ($t eq 'char*')
{ {
print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore; print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_STRING_FIELD($f);\n" unless $equal_ignore; print $eff "\tCOMPARE_STRING_FIELD($f);\n" unless $equal_ignore;
} }
elsif ($t eq 'Bitmapset*' || $t eq 'Relids') elsif ($t eq 'Bitmapset*' || $t eq 'Relids')
@ -779,7 +779,7 @@ _equal${n}(const $n *a, const $n *b)
} }
elsif ($t eq 'int' && $f =~ 'location$') elsif ($t eq 'int' && $f =~ 'location$')
{ {
print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore; print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_LOCATION_FIELD($f);\n" unless $equal_ignore; print $eff "\tCOMPARE_LOCATION_FIELD($f);\n" unless $equal_ignore;
} }
elsif (elem $t, @scalar_types or elem $t, @enum_types) elsif (elem $t, @scalar_types or elem $t, @enum_types)
@ -828,7 +828,7 @@ _equal${n}(const $n *a, const $n *b)
elsif ($t eq 'function pointer') elsif ($t eq 'function pointer')
{ {
# we can copy and compare as a scalar # we can copy and compare as a scalar
print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore; print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore; print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore;
} }
# node type # node type
@ -846,13 +846,13 @@ _equal${n}(const $n *a, const $n *b)
and $1 ne 'List' and $1 ne 'List'
and !$equal_ignore; and !$equal_ignore;
print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore; print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_NODE_FIELD($f);\n" unless $equal_ignore; print $eff "\tCOMPARE_NODE_FIELD($f);\n" unless $equal_ignore;
} }
# array (inline) # array (inline)
elsif ($t =~ /^\w+\[\w+\]$/) elsif ($t =~ /^\w+\[\w+\]$/)
{ {
print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore; print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_ARRAY_FIELD($f);\n" unless $equal_ignore; print $eff "\tCOMPARE_ARRAY_FIELD($f);\n" unless $equal_ignore;
} }
elsif ($t eq 'struct CustomPathMethods*' elsif ($t eq 'struct CustomPathMethods*'
@ -861,7 +861,7 @@ _equal${n}(const $n *a, const $n *b)
# Fields of these types are required to be a pointer to a # Fields of these types are required to be a pointer to a
# static table of callback functions. So we don't copy # static table of callback functions. So we don't copy
# the table itself, just reference the original one. # the table itself, just reference the original one.
print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore; print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore; print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore;
} }
else else
@ -1073,7 +1073,7 @@ _read${n}(void)
{ {
print $off "\tWRITE_FLOAT_FIELD($f.startup);\n"; print $off "\tWRITE_FLOAT_FIELD($f.startup);\n";
print $off "\tWRITE_FLOAT_FIELD($f.per_tuple);\n"; print $off "\tWRITE_FLOAT_FIELD($f.per_tuple);\n";
print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read; print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read;
print $rff "\tREAD_FLOAT_FIELD($f.per_tuple);\n" unless $no_read; print $rff "\tREAD_FLOAT_FIELD($f.per_tuple);\n" unless $no_read;
} }
elsif ($t eq 'Selectivity') elsif ($t eq 'Selectivity')
@ -1278,8 +1278,8 @@ _jumble${n}(JumbleState *jstate, Node *node)
# print instructions for each field # print instructions for each field
foreach my $f (@{ $node_type_info{$n}->{fields} }) foreach my $f (@{ $node_type_info{$n}->{fields} })
{ {
my $t = $node_type_info{$n}->{field_types}{$f}; my $t = $node_type_info{$n}->{field_types}{$f};
my @a = @{ $node_type_info{$n}->{field_attrs}{$f} }; my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
my $query_jumble_ignore = $struct_no_query_jumble; my $query_jumble_ignore = $struct_no_query_jumble;
my $query_jumble_location = 0; my $query_jumble_location = 0;

View File

@ -2011,7 +2011,7 @@ cost_incremental_sort(Path *path,
{ {
PathKey *key = (PathKey *) lfirst(l); PathKey *key = (PathKey *) lfirst(l);
EquivalenceMember *member = (EquivalenceMember *) EquivalenceMember *member = (EquivalenceMember *)
linitial(key->pk_eclass->ec_members); linitial(key->pk_eclass->ec_members);
/* /*
* Check if the expression contains Var with "varno 0" so that we * Check if the expression contains Var with "varno 0" so that we

View File

@ -370,7 +370,7 @@ adjust_appendrel_attrs_mutator(Node *node,
if (leaf_relid) if (leaf_relid)
{ {
RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *) RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
list_nth(context->root->row_identity_vars, var->varattno - 1); list_nth(context->root->row_identity_vars, var->varattno - 1);
if (bms_is_member(leaf_relid, ridinfo->rowidrels)) if (bms_is_member(leaf_relid, ridinfo->rowidrels))
{ {

View File

@ -1158,7 +1158,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
{ {
/* UPDATE/DELETE/MERGE row identity vars are always needed */ /* UPDATE/DELETE/MERGE row identity vars are always needed */
RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *) RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
list_nth(root->row_identity_vars, var->varattno - 1); list_nth(root->row_identity_vars, var->varattno - 1);
/* Update reltarget width estimate from RowIdentityVarInfo */ /* Update reltarget width estimate from RowIdentityVarInfo */
joinrel->reltarget->width += ridinfo->rowidwidth; joinrel->reltarget->width += ridinfo->rowidwidth;

View File

@ -9,7 +9,7 @@
use strict; use strict;
use warnings; use warnings;
my $gram_filename = $ARGV[0]; my $gram_filename = $ARGV[0];
my $kwlist_filename = $ARGV[1]; my $kwlist_filename = $ARGV[1];
my $errors = 0; my $errors = 0;
@ -47,10 +47,10 @@ $, = ' '; # set output field separator
$\ = "\n"; # set output record separator $\ = "\n"; # set output record separator
my %keyword_categories; my %keyword_categories;
$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD'; $keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD';
$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD'; $keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD';
$keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD'; $keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD';
$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD'; $keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD';
open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename"); open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename");
@ -183,7 +183,7 @@ kwlist_line: while (<$kwlist>)
if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/) if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/)
{ {
my ($kwstring) = $1; my ($kwstring) = $1;
my ($kwname) = $2; my ($kwname) = $2;
my ($kwcat_id) = $3; my ($kwcat_id) = $3;
my ($collabel) = $4; my ($collabel) = $4;

View File

@ -3357,7 +3357,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format,
if (format->format_type == JS_FORMAT_JSON) if (format->format_type == JS_FORMAT_JSON)
{ {
JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ? JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ?
format->encoding : JS_ENC_UTF8; format->encoding : JS_ENC_UTF8;
if (targettype != BYTEAOID && if (targettype != BYTEAOID &&
format->encoding != JS_ENC_DEFAULT) format->encoding != JS_ENC_DEFAULT)

View File

@ -165,8 +165,8 @@ transformMergeStmt(ParseState *pstate, MergeStmt *stmt)
/* /*
* Set up the MERGE target table. The target table is added to the * Set up the MERGE target table. The target table is added to the
* namespace below and to joinlist in transform_MERGE_to_join, so don't * namespace below and to joinlist in transform_MERGE_to_join, so don't do
* do it here. * it here.
*/ */
qry->resultRelation = setTargetTable(pstate, stmt->relation, qry->resultRelation = setTargetTable(pstate, stmt->relation,
stmt->relation->inh, stmt->relation->inh,

View File

@ -993,7 +993,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
{ {
aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(), aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(),
ACL_USAGE); ACL_USAGE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TYPE, aclcheck_error(aclresult, OBJECT_TYPE,
RelationGetRelationName(relation)); RelationGetRelationName(relation));
@ -2355,7 +2355,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
* mentioned above. * mentioned above.
*/ */
Datum attoptions = Datum attoptions =
get_attoptions(RelationGetRelid(index_rel), i + 1); get_attoptions(RelationGetRelid(index_rel), i + 1);
defopclass = GetDefaultOpClass(attform->atttypid, defopclass = GetDefaultOpClass(attform->atttypid,
index_rel->rd_rel->relam); index_rel->rd_rel->relam);

View File

@ -2340,9 +2340,9 @@ merge_default_partitions(PartitionMap *outer_map,
/* /*
* The default partitions have to be joined with each other, so merge * The default partitions have to be joined with each other, so merge
* them. Note that each of the default partitions isn't merged yet * them. Note that each of the default partitions isn't merged yet
* (see, process_outer_partition()/process_inner_partition()), so * (see, process_outer_partition()/process_inner_partition()), so they
* they should be merged successfully. The merged partition will act * should be merged successfully. The merged partition will act as
* as the default partition of the join relation. * the default partition of the join relation.
*/ */
Assert(outer_merged_index == -1); Assert(outer_merged_index == -1);
Assert(inner_merged_index == -1); Assert(inner_merged_index == -1);
@ -3193,7 +3193,7 @@ check_new_partition_bound(char *relname, Relation parent,
* datums list. * datums list.
*/ */
PartitionRangeDatum *datum = PartitionRangeDatum *datum =
list_nth(spec->upperdatums, abs(cmpval) - 1); list_nth(spec->upperdatums, abs(cmpval) - 1);
/* /*
* The new partition overlaps with the * The new partition overlaps with the

View File

@ -58,8 +58,8 @@ fork_process(void)
/* /*
* We start postmaster children with signals blocked. This allows them to * We start postmaster children with signals blocked. This allows them to
* install their own handlers before unblocking, to avoid races where they * install their own handlers before unblocking, to avoid races where they
* might run the postmaster's handler and miss an important control signal. * might run the postmaster's handler and miss an important control
* With more analysis this could potentially be relaxed. * signal. With more analysis this could potentially be relaxed.
*/ */
sigprocmask(SIG_SETMASK, &BlockSig, &save_mask); sigprocmask(SIG_SETMASK, &BlockSig, &save_mask);
result = fork(); result = fork();

View File

@ -759,6 +759,7 @@ lexescape(struct vars *v)
RETV(PLAIN, c); RETV(PLAIN, c);
break; break;
default: default:
/* /*
* Throw an error for unrecognized ASCII alpha escape sequences, * Throw an error for unrecognized ASCII alpha escape sequences,
* which reserves them for future use if needed. * which reserves them for future use if needed.

View File

@ -259,7 +259,7 @@ libpqrcv_check_conninfo(const char *conninfo, bool must_use_password)
if (must_use_password) if (must_use_password)
{ {
bool uses_password = false; bool uses_password = false;
for (opt = opts; opt->keyword != NULL; ++opt) for (opt = opts; opt->keyword != NULL; ++opt)
{ {

Some files were not shown because too many files have changed in this diff Show More