Pre-beta mechanical code beautification.

Run pgindent, pgperltidy, and reformat-dat-files.

This set of diffs is a bit larger than typical.  We've updated to
pg_bsd_indent 2.1.2, which properly indents variable declarations that
have multi-line initialization expressions (the continuation lines are
now indented one tab stop).  We've also updated to perltidy version
20230309 and changed some of its settings, which reduces its desire to
add whitespace to lines to make assignments etc. line up.  Going
forward, that should make for fewer random-seeming changes to existing
code.

Discussion: https://postgr.es/m/20230428092545.qfb3y5wcu4cm75ur@alvherre.pgsql
This commit is contained in:
Tom Lane 2023-05-19 17:24:48 -04:00
parent df6b19fbbc
commit 0245f8db36
402 changed files with 4756 additions and 4427 deletions

View File

@ -38,30 +38,35 @@ $node->safe_psql('postgres', q(CREATE TABLE tbl(i int)));
my $main_h = $node->background_psql('postgres');
$main_h->query_safe(q(
$main_h->query_safe(
q(
BEGIN;
INSERT INTO tbl VALUES(0);
));
my $cic_h = $node->background_psql('postgres');
$cic_h->query_until(qr/start/, q(
$cic_h->query_until(
qr/start/, q(
\echo start
CREATE INDEX CONCURRENTLY idx ON tbl(i);
));
$main_h->query_safe(q(
$main_h->query_safe(
q(
PREPARE TRANSACTION 'a';
));
$main_h->query_safe(q(
$main_h->query_safe(
q(
BEGIN;
INSERT INTO tbl VALUES(0);
));
$node->safe_psql('postgres', q(COMMIT PREPARED 'a';));
$main_h->query_safe(q(
$main_h->query_safe(
q(
PREPARE TRANSACTION 'b';
BEGIN;
INSERT INTO tbl VALUES(0);
@ -69,7 +74,8 @@ INSERT INTO tbl VALUES(0);
$node->safe_psql('postgres', q(COMMIT PREPARED 'b';));
$main_h->query_safe(q(
$main_h->query_safe(
q(
PREPARE TRANSACTION 'c';
COMMIT PREPARED 'c';
));
@ -97,7 +103,8 @@ PREPARE TRANSACTION 'persists_forever';
$node->restart;
my $reindex_h = $node->background_psql('postgres');
$reindex_h->query_until(qr/start/, q(
$reindex_h->query_until(
qr/start/, q(
\echo start
DROP INDEX CONCURRENTLY idx;
CREATE INDEX CONCURRENTLY idx ON tbl(i);

View File

@ -484,9 +484,9 @@ verify_heapam(PG_FUNCTION_ARGS)
/*
* Since we've checked that this redirect points to a line
* pointer between FirstOffsetNumber and maxoff, it should
* now be safe to fetch the referenced line pointer. We expect
* it to be LP_NORMAL; if not, that's corruption.
* pointer between FirstOffsetNumber and maxoff, it should now
* be safe to fetch the referenced line pointer. We expect it
* to be LP_NORMAL; if not, that's corruption.
*/
rditem = PageGetItemId(ctx.page, rdoffnum);
if (!ItemIdIsUsed(rditem))
@ -610,8 +610,8 @@ verify_heapam(PG_FUNCTION_ARGS)
{
/*
* We should not have set successor[ctx.offnum] to a value
* other than InvalidOffsetNumber unless that line pointer
* is LP_NORMAL.
* other than InvalidOffsetNumber unless that line pointer is
* LP_NORMAL.
*/
Assert(ItemIdIsNormal(next_lp));
@ -642,8 +642,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
* If the next line pointer is a redirect, or if it's a tuple
* but the XMAX of this tuple doesn't match the XMIN of the next
* If the next line pointer is a redirect, or if it's a tuple but
* the XMAX of this tuple doesn't match the XMIN of the next
* tuple, then the two aren't part of the same update chain and
* there is nothing more to do.
*/
@ -667,8 +667,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
* This tuple and the tuple to which it points seem to be part
* of an update chain.
* This tuple and the tuple to which it points seem to be part of
* an update chain.
*/
predecessor[nextoffnum] = ctx.offnum;
@ -721,8 +721,8 @@ verify_heapam(PG_FUNCTION_ARGS)
}
/*
* If the current tuple's xmin is aborted but the successor tuple's
* xmin is in-progress or committed, that's corruption.
* If the current tuple's xmin is aborted but the successor
* tuple's xmin is in-progress or committed, that's corruption.
*/
if (xmin_commit_status_ok[ctx.offnum] &&
xmin_commit_status[ctx.offnum] == XID_ABORTED &&
@ -1897,8 +1897,8 @@ FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx)
diff = (int32) (ctx->next_xid - xid);
/*
* In cases of corruption we might see a 32bit xid that is before epoch
* 0. We can't represent that as a 64bit xid, due to 64bit xids being
* In cases of corruption we might see a 32bit xid that is before epoch 0.
* We can't represent that as a 64bit xid, due to 64bit xids being
* unsigned integers, without the modulo arithmetic of 32bit xid. There's
* no really nice way to deal with that, but it works ok enough to use
* FirstNormalFullTransactionId in that case, as a freshly initdb'd

View File

@ -407,8 +407,8 @@ basic_archive_shutdown(ArchiveModuleState *state)
MemoryContext basic_archive_context;
/*
* If we didn't get to storing the pointer to our allocated state, we don't
* have anything to clean up.
* If we didn't get to storing the pointer to our allocated state, we
* don't have anything to clean up.
*/
if (data == NULL)
return;

View File

@ -186,7 +186,7 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long")));
if (! pushquery(state, type, ltree_crc32_sz(strval, lenval),
if (!pushquery(state, type, ltree_crc32_sz(strval, lenval),
state->curop - state->op, lenval, flag))
return false;

View File

@ -2024,9 +2024,8 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
/*
* Should never get called when the insert is being performed on a table
* that is also among the target relations of an UPDATE operation,
* because postgresBeginForeignInsert() currently rejects such insert
* attempts.
* that is also among the target relations of an UPDATE operation, because
* postgresBeginForeignInsert() currently rejects such insert attempts.
*/
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
@ -5173,9 +5172,9 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
&can_tablesample);
/*
* Make sure we're not choosing TABLESAMPLE when the remote relation does
* not support that. But only do this for "auto" - if the user explicitly
* requested BERNOULLI/SYSTEM, it's better to fail.
* Make sure we're not choosing TABLESAMPLE when the remote relation
* does not support that. But only do this for "auto" - if the user
* explicitly requested BERNOULLI/SYSTEM, it's better to fail.
*/
if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
method = ANALYZE_SAMPLE_RANDOM;
@ -5189,35 +5188,35 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
else
{
/*
* All supported sampling methods require sampling rate,
* not target rows directly, so we calculate that using
* the remote reltuples value. That's imperfect, because
* it might be off a good deal, but that's not something
* we can (or should) address here.
* All supported sampling methods require sampling rate, not
* target rows directly, so we calculate that using the remote
* reltuples value. That's imperfect, because it might be off a
* good deal, but that's not something we can (or should) address
* here.
*
* If reltuples is too low (i.e. when table grew), we'll
* end up sampling more rows - but then we'll apply the
* local sampling, so we get the expected sample size.
* This is the same outcome as without remote sampling.
* If reltuples is too low (i.e. when table grew), we'll end up
* sampling more rows - but then we'll apply the local sampling,
* so we get the expected sample size. This is the same outcome as
* without remote sampling.
*
* If reltuples is too high (e.g. after bulk DELETE), we
* will end up sampling too few rows.
* If reltuples is too high (e.g. after bulk DELETE), we will end
* up sampling too few rows.
*
* We can't really do much better here - we could try
* sampling a bit more rows, but we don't know how off
* the reltuples value is so how much is "a bit more"?
* We can't really do much better here - we could try sampling a
* bit more rows, but we don't know how off the reltuples value is
* so how much is "a bit more"?
*
* Furthermore, the targrows value for partitions is
* determined based on table size (relpages), which can
* be off in different ways too. Adjusting the sampling
* rate here might make the issue worse.
* Furthermore, the targrows value for partitions is determined
* based on table size (relpages), which can be off in different
* ways too. Adjusting the sampling rate here might make the issue
* worse.
*/
sample_frac = targrows / reltuples;
/*
* We should never get sampling rate outside the valid range
* (between 0.0 and 1.0), because those cases should be covered
* by the previous branch that sets ANALYZE_SAMPLE_OFF.
* (between 0.0 and 1.0), because those cases should be covered by
* the previous branch that sets ANALYZE_SAMPLE_OFF.
*/
Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
}

View File

@ -700,8 +700,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
}
/*
* If we found a scan key eliminating the range, no need to
* check additional ones.
* If we found a scan key eliminating the range, no need
* to check additional ones.
*/
if (!addrange)
break;
@ -1223,7 +1223,7 @@ brin_build_desc(Relation rel)
* Obtain BrinOpcInfo for each indexed column. While at it, accumulate
* the number of columns stored, since the number is opclass-defined.
*/
opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts);
opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts);
for (keyno = 0; keyno < tupdesc->natts; keyno++)
{
FmgrInfo *opcInfoFn;
@ -1801,8 +1801,8 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
bval = &dtup->bt_columns[keyno];
/*
* Does the range have actual NULL values? Either of the flags can
* be set, but we ignore the state before adding first row.
* Does the range have actual NULL values? Either of the flags can be
* set, but we ignore the state before adding first row.
*
* We have to remember this, because we'll modify the flags and we
* need to know if the range started as empty.
@ -1842,12 +1842,12 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
/*
* If the range was had actual NULL values (i.e. did not start empty),
* make sure we don't forget about the NULL values. Either the allnulls
* flag is still set to true, or (if the opclass cleared it) we need to
* set hasnulls=true.
* make sure we don't forget about the NULL values. Either the
* allnulls flag is still set to true, or (if the opclass cleared it)
* we need to set hasnulls=true.
*
* XXX This can only happen when the opclass modified the tuple, so the
* modified flag should be set.
* XXX This can only happen when the opclass modified the tuple, so
* the modified flag should be set.
*/
if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls))
{
@ -1859,9 +1859,9 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
/*
* After updating summaries for all the keys, mark it as not empty.
*
* If we're actually changing the flag value (i.e. tuple started as empty),
* we should have modified the tuple. So we should not see empty range that
* was not modified.
* If we're actually changing the flag value (i.e. tuple started as
* empty), we should have modified the tuple. So we should not see empty
* range that was not modified.
*/
Assert(!dtup->bt_empty_range || modified);
dtup->bt_empty_range = false;

View File

@ -289,7 +289,8 @@ hashtext(PG_FUNCTION_ARGS)
}
else
{
Size bsize, rsize;
Size bsize,
rsize;
char *buf;
const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key);
@ -304,8 +305,8 @@ hashtext(PG_FUNCTION_ARGS)
/*
* In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior
* must be preserved.
* character in the hash, but it was done before and the behavior must
* be preserved.
*/
result = hash_any((uint8_t *) buf, bsize + 1);
@ -343,7 +344,8 @@ hashtextextended(PG_FUNCTION_ARGS)
}
else
{
Size bsize, rsize;
Size bsize,
rsize;
char *buf;
const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key);
@ -357,8 +359,8 @@ hashtextextended(PG_FUNCTION_ARGS)
/*
* In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior
* must be preserved.
* character in the hash, but it was done before and the behavior must
* be preserved.
*/
result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1));

View File

@ -334,8 +334,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
* Note: heap_update returns the tid (location) of the new tuple in the
* t_self field.
*
* If the update is not HOT, we must update all indexes. If the update
* is HOT, it could be that we updated summarized columns, so we either
* If the update is not HOT, we must update all indexes. If the update is
* HOT, it could be that we updated summarized columns, so we either
* update only summarized indexes, or none at all.
*/
if (result != TM_Ok)

View File

@ -389,6 +389,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
params->truncate != VACOPTVALUE_AUTO);
/*
* While VacuumFailSafeActive is reset to false before calling this, we
* still need to reset it here due to recursive calls.
@ -1813,12 +1814,12 @@ retry:
{
/*
* We have no freeze plans to execute, so there's no added cost
* from following the freeze path. That's why it was chosen.
* This is important in the case where the page only contains
* totally frozen tuples at this point (perhaps only following
* pruning). Such pages can be marked all-frozen in the VM by our
* caller, even though none of its tuples were newly frozen here
* (note that the "no freeze" path never sets pages all-frozen).
* from following the freeze path. That's why it was chosen. This
* is important in the case where the page only contains totally
* frozen tuples at this point (perhaps only following pruning).
* Such pages can be marked all-frozen in the VM by our caller,
* even though none of its tuples were newly frozen here (note
* that the "no freeze" path never sets pages all-frozen).
*
* We never increment the frozen_pages instrumentation counter
* here, since it only counts pages with newly frozen tuples

View File

@ -375,8 +375,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
/*
* Serialize the transaction snapshot if the transaction
* isolation level uses a transaction snapshot.
* Serialize the transaction snapshot if the transaction isolation
* level uses a transaction snapshot.
*/
if (IsolationUsesXactSnapshot())
{
@ -1497,8 +1497,8 @@ ParallelWorkerMain(Datum main_arg)
RestoreClientConnectionInfo(clientconninfospace);
/*
* Initialize SystemUser now that MyClientConnectionInfo is restored.
* Also ensure that auth_method is actually valid, aka authn_id is not NULL.
* Initialize SystemUser now that MyClientConnectionInfo is restored. Also
* ensure that auth_method is actually valid, aka authn_id is not NULL.
*/
if (MyClientConnectionInfo.authn_id)
InitializeSystemUser(MyClientConnectionInfo.authn_id,

View File

@ -3152,10 +3152,9 @@ CommitTransactionCommand(void)
break;
/*
* The user issued a SAVEPOINT inside a transaction block.
* Start a subtransaction. (DefineSavepoint already did
* PushTransaction, so as to have someplace to put the SUBBEGIN
* state.)
* The user issued a SAVEPOINT inside a transaction block. Start a
* subtransaction. (DefineSavepoint already did PushTransaction,
* so as to have someplace to put the SUBBEGIN state.)
*/
case TBLOCK_SUBBEGIN:
StartSubTransaction();

View File

@ -5460,8 +5460,8 @@ StartupXLOG(void)
missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
/*
* Reset ps status display, so as no information related to recovery
* shows up.
* Reset ps status display, so as no information related to recovery shows
* up.
*/
set_ps_display("");
@ -5596,9 +5596,9 @@ StartupXLOG(void)
if (!XLogRecPtrIsInvalid(missingContrecPtr))
{
/*
* We should only have a missingContrecPtr if we're not switching to
* a new timeline. When a timeline switch occurs, WAL is copied from
* the old timeline to the new only up to the end of the last complete
* We should only have a missingContrecPtr if we're not switching to a
* new timeline. When a timeline switch occurs, WAL is copied from the
* old timeline to the new only up to the end of the last complete
* record, so there can't be an incomplete WAL record that we need to
* disregard.
*/

View File

@ -897,8 +897,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
*
* XLogReader machinery is only able to handle records up to a certain
* size (ignoring machine resource limitations), so make sure that we will
* not emit records larger than the sizes advertised to be supported.
* This cap is based on DecodeXLogRecordRequiredSpace().
* not emit records larger than the sizes advertised to be supported. This
* cap is based on DecodeXLogRecordRequiredSpace().
*/
if (total_len >= XLogRecordMaxSize)
ereport(ERROR,

View File

@ -1609,10 +1609,10 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename,
*
* There's no guarantee that this will actually
* happen, though: the torn write could take an
* arbitrarily long time to complete. Retrying multiple
* times wouldn't fix this problem, either, though
* it would reduce the chances of it happening in
* practice. The only real fix here seems to be to
* arbitrarily long time to complete. Retrying
* multiple times wouldn't fix this problem, either,
* though it would reduce the chances of it happening
* in practice. The only real fix here seems to be to
* have some kind of interlock that allows us to wait
* until we can be certain that no write to the block
* is in progress. Since we don't have any such thing

View File

@ -350,6 +350,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0);
/*
* int8 may seem like a surprising data type for this, but in theory int4
* would not be wide enough for this, as TimeLineID is unsigned.
@ -360,7 +361,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
/* Data row */
values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
values[1] = Int64GetDatum(tli);
do_tup_output(tstate, values, nulls);

View File

@ -243,8 +243,8 @@ sub ParseHeader
# BKI_LOOKUP implicitly makes an FK reference
push @{ $catalog{foreign_keys} },
{
is_array =>
($atttype eq 'oidvector' || $atttype eq '_oid')
is_array => (
$atttype eq 'oidvector' || $atttype eq '_oid')
? 1
: 0,
is_opt => $column{lookup_opt},

View File

@ -3389,8 +3389,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask,
result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
/*
* Check if ACL_MAINTAIN is being checked and, if so, and not already set as
* part of the result, then check if the user is a member of the
* Check if ACL_MAINTAIN is being checked and, if so, and not already set
* as part of the result, then check if the user is a member of the
* pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH
* MATERIALIZED VIEW, and REINDEX on all relations.
*/

View File

@ -148,8 +148,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple,
#endif /* USE_ASSERT_CHECKING */
/*
* Skip insertions into non-summarizing indexes if we only need
* to update summarizing indexes.
* Skip insertions into non-summarizing indexes if we only need to
* update summarizing indexes.
*/
if (onlySummarized && !indexInfo->ii_Summarizing)
continue;

View File

@ -1414,6 +1414,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
/* FALLTHROUGH */
case SHARED_DEPENDENCY_OWNER:
/*
* Save it for deletion below, if it's a local object or a
* role grant. Other shared objects, such as databases,

View File

@ -487,6 +487,7 @@ pg_collation_actual_version(PG_FUNCTION_ARGS)
/* retrieve from pg_database */
HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
if (!HeapTupleIsValid(dbtup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@ -507,6 +508,7 @@ pg_collation_actual_version(PG_FUNCTION_ARGS)
/* retrieve from pg_collation */
HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
if (!HeapTupleIsValid(colltp))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@ -657,11 +659,10 @@ create_collation_from_locale(const char *locale, int nspid,
Oid collid;
/*
* Some systems have locale names that don't consist entirely of
* ASCII letters (such as "bokm&aring;l" or "fran&ccedil;ais").
* This is pretty silly, since we need the locale itself to
* interpret the non-ASCII characters. We can't do much with
* those, so we filter them out.
* Some systems have locale names that don't consist entirely of ASCII
* letters (such as "bokm&aring;l" or "fran&ccedil;ais"). This is pretty
* silly, since we need the locale itself to interpret the non-ASCII
* characters. We can't do much with those, so we filter them out.
*/
if (!pg_is_ascii(locale))
{
@ -687,13 +688,12 @@ create_collation_from_locale(const char *locale, int nspid,
(*nvalidp)++;
/*
* Create a collation named the same as the locale, but quietly
* doing nothing if it already exists. This is the behavior we
* need even at initdb time, because some versions of "locale -a"
* can report the same locale name more than once. And it's
* convenient for later import runs, too, since you just about
* always want to add on new locales without a lot of chatter
* about existing ones.
* Create a collation named the same as the locale, but quietly doing
* nothing if it already exists. This is the behavior we need even at
* initdb time, because some versions of "locale -a" can report the same
* locale name more than once. And it's convenient for later import runs,
* too, since you just about always want to add on new locales without a
* lot of chatter about existing ones.
*/
collid = CollationCreate(locale, nspid, GetUserId(),
COLLPROVIDER_LIBC, true, enc,
@ -995,8 +995,8 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
param.nvalidp = &nvalid;
/*
* Enumerate the locales that are either installed on or supported
* by the OS.
* Enumerate the locales that are either installed on or supported by
* the OS.
*/
if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL,
(LPARAM) &param, NULL))

View File

@ -1406,8 +1406,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
* If we're going to be reading data for the to-be-created database into
* shared_buffers, take a lock on it. Nobody should know that this
* database exists yet, but it's good to maintain the invariant that an
* AccessExclusiveLock on the database is sufficient to drop all
* of its buffers without worrying about more being read later.
* AccessExclusiveLock on the database is sufficient to drop all of its
* buffers without worrying about more being read later.
*
* Note that we need to do this before entering the
* PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback

View File

@ -493,6 +493,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
case OBJECT_TABLE:
case OBJECT_TABLESPACE:
case OBJECT_VIEW:
/*
* These are handled elsewhere, so if someone gets here the code
* is probably wrong or should be revisited.

View File

@ -3066,11 +3066,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
/*
* The table can be reindexed if the user has been granted MAINTAIN on
* the table or one of its partition ancestors or the user is a
* superuser, the table owner, or the database/schema owner (but in the
* latter case, only if it's not a shared relation). pg_class_aclcheck
* includes the superuser case, and depending on objectKind we already
* know that the user has permission to run REINDEX on this database or
* schema per the permission checks at the beginning of this routine.
* superuser, the table owner, or the database/schema owner (but in
* the latter case, only if it's not a shared relation).
* pg_class_aclcheck includes the superuser case, and depending on
* objectKind we already know that the user has permission to run
* REINDEX on this database or schema per the permission checks at the
* beginning of this routine.
*/
if (classtuple->relisshared &&
pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&

View File

@ -604,9 +604,9 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)");
/*
* We don't want to allow unprivileged users to be able to trigger attempts
* to access arbitrary network destinations, so require the user to have
* been specifically authorized to create subscriptions.
* We don't want to allow unprivileged users to be able to trigger
* attempts to access arbitrary network destinations, so require the user
* to have been specifically authorized to create subscriptions.
*/
if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION))
ereport(ERROR,
@ -1837,8 +1837,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
* current owner must have CREATE on database
*
* This is consistent with how ALTER SCHEMA ... OWNER TO works, but some
* other object types behave differently (e.g. you can't give a table to
* a user who lacks CREATE privileges on a schema).
* other object types behave differently (e.g. you can't give a table to a
* user who lacks CREATE privileges on a schema).
*/
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId,
GetUserId(), ACL_CREATE);

View File

@ -535,8 +535,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
*
* The grantor of record for this implicit grant is the bootstrap
* superuser, which means that the CREATEROLE user cannot revoke the
* grant. They can however grant the created role back to themselves
* with different options, since they enjoy ADMIN OPTION on it.
* grant. They can however grant the created role back to themselves with
* different options, since they enjoy ADMIN OPTION on it.
*/
if (!superuser())
{
@ -561,8 +561,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
BOOTSTRAP_SUPERUSERID, &poptself);
/*
* We must make the implicit grant visible to the code below, else
* the additional grants will fail.
* We must make the implicit grant visible to the code below, else the
* additional grants will fail.
*/
CommandCounterIncrement();
@ -585,8 +585,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
* Add the specified members to this new role. adminmembers get the admin
* option, rolemembers don't.
*
* NB: No permissions check is required here. If you have enough rights
* to create a role, you can add any members you like.
* NB: No permissions check is required here. If you have enough rights to
* create a role, you can add any members you like.
*/
AddRoleMems(currentUserId, stmt->role, roleid,
rolemembers, roleSpecsToIds(rolemembers),
@ -1021,9 +1021,9 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
shdepLockAndCheckObject(AuthIdRelationId, roleid);
/*
* To mess with a superuser you gotta be superuser; otherwise you
* need CREATEROLE plus admin option on the target role; unless you're
* just trying to change your own settings
* To mess with a superuser you gotta be superuser; otherwise you need
* CREATEROLE plus admin option on the target role; unless you're just
* trying to change your own settings
*/
if (roleform->rolsuper)
{
@ -1546,8 +1546,8 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt)
/*
* Step through all of the granted roles and add, update, or remove
* entries in pg_auth_members as appropriate. If stmt->is_grant is true,
* we are adding new grants or, if they already exist, updating options
* on those grants. If stmt->is_grant is false, we are revoking grants or
* we are adding new grants or, if they already exist, updating options on
* those grants. If stmt->is_grant is false, we are revoking grants or
* removing options from them.
*/
foreach(item, stmt->granted_roles)
@ -1848,8 +1848,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
ObjectIdGetDatum(grantorId));
/*
* If we found a tuple, update it with new option values, unless
* there are no changes, in which case issue a WARNING.
* If we found a tuple, update it with new option values, unless there
* are no changes, in which case issue a WARNING.
*
* If we didn't find a tuple, just insert one.
*/
@ -2332,8 +2332,8 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions,
/*
* If popt.specified == 0, we're revoking the grant entirely; otherwise,
* we expect just one bit to be set, and we're revoking the corresponding
* option. As of this writing, there's no syntax that would allow for
* an attempt to revoke multiple options at once, and the logic below
* option. As of this writing, there's no syntax that would allow for an
* attempt to revoke multiple options at once, and the logic below
* wouldn't work properly if such syntax were added, so assert that our
* caller isn't trying to do that.
*/
@ -2572,7 +2572,7 @@ check_createrole_self_grant(char **newval, void **extra, GucSource source)
void
assign_createrole_self_grant(const char *newval, void *extra)
{
unsigned options = * (unsigned *) extra;
unsigned options = *(unsigned *) extra;
createrole_self_grant_enabled = (options != 0);
createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN

View File

@ -354,8 +354,8 @@ ExecInsertIndexTuples(ResultRelInfo *resultRelInfo,
continue;
/*
* Skip processing of non-summarizing indexes if we only
* update summarizing indexes
* Skip processing of non-summarizing indexes if we only update
* summarizing indexes
*/
if (onlySummarizing && !indexInfo->ii_Summarizing)
continue;

View File

@ -1330,18 +1330,18 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
BufFile *file = *fileptr;
/*
* The batch file is lazily created. If this is the first tuple
* written to this batch, the batch file is created and its buffer is
* allocated in the spillCxt context, NOT in the batchCxt.
* The batch file is lazily created. If this is the first tuple written to
* this batch, the batch file is created and its buffer is allocated in
* the spillCxt context, NOT in the batchCxt.
*
* During the build phase, buffered files are created for inner
* batches. Each batch's buffered file is closed (and its buffer freed)
* after the batch is loaded into memory during the outer side scan.
* Therefore, it is necessary to allocate the batch file buffer in a
* memory context which outlives the batch itself.
* During the build phase, buffered files are created for inner batches.
* Each batch's buffered file is closed (and its buffer freed) after the
* batch is loaded into memory during the outer side scan. Therefore, it
* is necessary to allocate the batch file buffer in a memory context
* which outlives the batch itself.
*
* Also, we use spillCxt instead of hashCxt for a better accounting of
* the spilling memory consumption.
* Also, we use spillCxt instead of hashCxt for a better accounting of the
* spilling memory consumption.
*/
if (file == NULL)
{

View File

@ -799,9 +799,9 @@ llvm_session_initialize(void)
LLVMInitializeNativeAsmParser();
/*
* When targeting an LLVM version with opaque pointers enabled by
* default, turn them off for the context we build our code in. We don't
* need to do so for other contexts (e.g. llvm_ts_context). Once the IR is
* When targeting an LLVM version with opaque pointers enabled by default,
* turn them off for the context we build our code in. We don't need to
* do so for other contexts (e.g. llvm_ts_context). Once the IR is
* generated, it carries the necessary information.
*/
#if LLVM_VERSION_MAJOR > 14
@ -1118,7 +1118,7 @@ llvm_resolve_symbol(const char *symname, void *ctx)
static LLVMErrorRef
llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx,
LLVMOrcLookupStateRef * LookupState, LLVMOrcLookupKind Kind,
LLVMOrcLookupStateRef *LookupState, LLVMOrcLookupKind Kind,
LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags,
LLVMOrcCLookupSet LookupSet, size_t LookupSetSize)
{

View File

@ -2127,8 +2127,7 @@ llvm_compile_expr(ExprState *state)
/*
* pergroup = &aggstate->all_pergroups
* [op->d.agg_trans.setoff]
* [op->d.agg_trans.transno];
* [op->d.agg_trans.setoff] [op->d.agg_trans.transno];
*/
v_allpergroupsp =
l_load_struct_gep(b, v_aggstatep,

View File

@ -527,8 +527,8 @@ secure_open_gssapi(Port *port)
/*
* Use the configured keytab, if there is one. As we now require MIT
* Kerberos, we might consider using the credential store extensions in the
* future instead of the environment variable.
* Kerberos, we might consider using the credential store extensions in
* the future instead of the environment variable.
*/
if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0')
{

View File

@ -1104,8 +1104,8 @@ prepare_cert_name(char *name)
if (namelen > MAXLEN)
{
/*
* Keep the end of the name, not the beginning, since the most specific
* field is likely to give users the most information.
* Keep the end of the name, not the beginning, since the most
* specific field is likely to give users the most information.
*/
truncated = name + namelen - MAXLEN;
truncated[0] = truncated[1] = truncated[2] = '.';
@ -1165,8 +1165,8 @@ verify_cb(int ok, X509_STORE_CTX *ctx)
/*
* Get the Subject and Issuer for logging, but don't let maliciously
* huge certs flood the logs, and don't reflect non-ASCII bytes into it
* either.
* huge certs flood the logs, and don't reflect non-ASCII bytes into
* it either.
*/
subject = X509_NAME_to_cstring(X509_get_subject_name(cert));
sub_prepared = prepare_cert_name(subject);

View File

@ -2693,8 +2693,9 @@ load_hba(void)
if (!ok)
{
/*
* File contained one or more errors, so bail out. MemoryContextDelete
* is enough to clean up everything, including regexes.
* File contained one or more errors, so bail out.
* MemoryContextDelete is enough to clean up everything, including
* regexes.
*/
MemoryContextDelete(hbacxt);
return false;
@ -3056,8 +3057,9 @@ load_ident(void)
if (!ok)
{
/*
* File contained one or more errors, so bail out. MemoryContextDelete
* is enough to clean up everything, including regexes.
* File contained one or more errors, so bail out.
* MemoryContextDelete is enough to clean up everything, including
* regexes.
*/
MemoryContextDelete(ident_context);
return false;

View File

@ -165,8 +165,8 @@ transformMergeStmt(ParseState *pstate, MergeStmt *stmt)
/*
* Set up the MERGE target table. The target table is added to the
* namespace below and to joinlist in transform_MERGE_to_join, so don't
* do it here.
* namespace below and to joinlist in transform_MERGE_to_join, so don't do
* it here.
*/
qry->resultRelation = setTargetTable(pstate, stmt->relation,
stmt->relation->inh,

View File

@ -2340,9 +2340,9 @@ merge_default_partitions(PartitionMap *outer_map,
/*
* The default partitions have to be joined with each other, so merge
* them. Note that each of the default partitions isn't merged yet
* (see, process_outer_partition()/process_inner_partition()), so
* they should be merged successfully. The merged partition will act
* as the default partition of the join relation.
* (see, process_outer_partition()/process_inner_partition()), so they
* should be merged successfully. The merged partition will act as
* the default partition of the join relation.
*/
Assert(outer_merged_index == -1);
Assert(inner_merged_index == -1);

View File

@ -58,8 +58,8 @@ fork_process(void)
/*
* We start postmaster children with signals blocked. This allows them to
* install their own handlers before unblocking, to avoid races where they
* might run the postmaster's handler and miss an important control signal.
* With more analysis this could potentially be relaxed.
* might run the postmaster's handler and miss an important control
* signal. With more analysis this could potentially be relaxed.
*/
sigprocmask(SIG_SETMASK, &BlockSig, &save_mask);
result = fork();

View File

@ -759,6 +759,7 @@ lexescape(struct vars *v)
RETV(PLAIN, c);
break;
default:
/*
* Throw an error for unrecognized ASCII alpha escape sequences,
* which reserves them for future use if needed.

View File

@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
* invalidated when this WAL record is replayed; and further,
* slot creation fails when wal_level is not sufficient; but
* all these operations are not synchronized, so a logical
* slot may creep in while the wal_level is being
* reduced. Hence this extra check.
* slot may creep in while the wal_level is being reduced.
* Hence this extra check.
*/
if (xlrec->wal_level < WAL_LEVEL_LOGICAL)
{

View File

@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin,
MemoryContext old_context;
/*
* On a standby, this check is also required while creating the
* slot. Check the comments in the function.
* On a standby, this check is also required while creating the slot.
* Check the comments in the function.
*/
CheckLogicalDecodingRequirements();

View File

@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
ReorderBufferTXN *txn;
/*
* Bail out if logical_replication_mode is buffered and we haven't exceeded
* the memory limit.
* Bail out if logical_replication_mode is buffered and we haven't
* exceeded the memory limit.
*/
if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED &&
rb->size < logical_decoding_work_mem * 1024L)
@ -4010,10 +4010,10 @@ ReorderBufferStreamTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
* After that we need to reuse the snapshot from the previous run.
*
* Unlike DecodeCommit which adds xids of all the subtransactions in
* snapshot's xip array via SnapBuildCommitTxn, we can't do that here
* but we do add them to subxip array instead via ReorderBufferCopySnap.
* This allows the catalog changes made in subtransactions decoded till
* now to be visible.
* snapshot's xip array via SnapBuildCommitTxn, we can't do that here but
* we do add them to subxip array instead via ReorderBufferCopySnap. This
* allows the catalog changes made in subtransactions decoded till now to
* be visible.
*/
if (txn->snapshot_now == NULL)
{

View File

@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
*/
/*
* xl_running_xacts record is older than what we can use, we might not have
* all necessary catalog rows anymore.
* xl_running_xacts record is older than what we can use, we might not
* have all necessary catalog rows anymore.
*/
if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
NormalTransactionIdPrecedes(running->oldestRunningXid,

View File

@ -5080,10 +5080,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
}
/*
* If we are processing this transaction using a parallel apply worker then
* either we send the changes to the parallel worker or if the worker is busy
* then serialize the changes to the file which will later be processed by
* the parallel worker.
* If we are processing this transaction using a parallel apply worker
* then either we send the changes to the parallel worker or if the worker
* is busy then serialize the changes to the file which will later be
* processed by the parallel worker.
*/
*winfo = pa_find_worker(xid);
@ -5097,9 +5097,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
}
/*
* If there is no parallel worker involved to process this transaction then
* we either directly apply the change or serialize it to a file which will
* later be applied when the transaction finish message is processed.
* If there is no parallel worker involved to process this transaction
* then we either directly apply the change or serialize it to a file
* which will later be applied when the transaction finish message is
* processed.
*/
else if (in_streamed_transaction)
{

View File

@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications,
* are multiple lists (one for each operation) to which row filters will
* be appended.
*
* FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row
* filter expression" so it takes precedence.
* FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter
* expression" so it takes precedence.
*/
foreach(lc, publications)
{

View File

@ -48,8 +48,7 @@ our @languages = qw(
our %ascii_languages = (
'hindi' => 'english',
'russian' => 'english',
);
'russian' => 'english',);
GetOptions(
'depfile' => \$depfile,

View File

@ -98,8 +98,7 @@ struct BufFile
/*
* XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid
* wasting per-file alignment padding when some users create many
* files.
* wasting per-file alignment padding when some users create many files.
*/
PGAlignedBlock buffer;
};

View File

@ -357,14 +357,15 @@ dsm_impl_posix_resize(int fd, off_t size)
/*
* Block all blockable signals, except SIGQUIT. posix_fallocate() can run
* for quite a long time, and is an all-or-nothing operation. If we
* allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery
* conflicts), the retry loop might never succeed.
* allowed SIGUSR1 to interrupt us repeatedly (for example, due to
* recovery conflicts), the retry loop might never succeed.
*/
if (IsUnderPostmaster)
sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask);
pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE);
#if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__)
/*
* On Linux, a shm_open fd is backed by a tmpfs file. If we were to use
* ftruncate, the file would contain a hole. Accessing memory backed by a
@ -374,8 +375,8 @@ dsm_impl_posix_resize(int fd, off_t size)
* SIGBUS later.
*
* We still use a traditional EINTR retry loop to handle SIGCONT.
* posix_fallocate() doesn't restart automatically, and we don't want
* this to fail if you attach a debugger.
* posix_fallocate() doesn't restart automatically, and we don't want this
* to fail if you attach a debugger.
*/
do
{
@ -383,9 +384,9 @@ dsm_impl_posix_resize(int fd, off_t size)
} while (rc == EINTR);
/*
* The caller expects errno to be set, but posix_fallocate() doesn't
* set it. Instead it returns error numbers directly. So set errno,
* even though we'll also return rc to indicate success or failure.
* The caller expects errno to be set, but posix_fallocate() doesn't set
* it. Instead it returns error numbers directly. So set errno, even
* though we'll also return rc to indicate success or failure.
*/
errno = rc;
#else

View File

@ -12,8 +12,7 @@ my $output_path = '.';
my $lastlockidx = -1;
my $continue = "\n";
GetOptions(
'outdir:s' => \$output_path);
GetOptions('outdir:s' => \$output_path);
open my $lwlocknames, '<', $ARGV[0] or die;
@ -71,7 +70,8 @@ printf $h "#define NUM_INDIVIDUAL_LWLOCKS %s\n", $lastlockidx + 1;
close $h;
close $c;
rename($htmp, "$output_path/lwlocknames.h") || die "rename: $htmp to $output_path/lwlocknames.h: $!";
rename($htmp, "$output_path/lwlocknames.h")
|| die "rename: $htmp to $output_path/lwlocknames.h: $!";
rename($ctmp, "$output_path/lwlocknames.c") || die "rename: $ctmp: $!";
close $lwlocknames;

View File

@ -3936,6 +3936,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
dclist_foreach(proc_iter, waitQueue)
{
PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
if (queued_proc == blocked_proc)
break;
data->waiter_pids[data->npids++] = queued_proc->pid;

View File

@ -1118,9 +1118,9 @@ LWLockDequeueSelf(LWLock *lock)
LWLockWaitListLock(lock);
/*
* Remove ourselves from the waitlist, unless we've already been
* removed. The removal happens with the wait list lock held, so there's
* no race in this check.
* Remove ourselves from the waitlist, unless we've already been removed.
* The removal happens with the wait list lock held, so there's no race in
* this check.
*/
on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
if (on_waitlist)

View File

@ -1825,8 +1825,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
/*
* If we didn't find any possibly unsafe conflicts because every
* uncommitted writable transaction turned out to be doomed, then we
* can "opt out" immediately. See comments above the earlier check for
* PredXact->WritableSxactCount == 0.
* can "opt out" immediately. See comments above the earlier check
* for PredXact->WritableSxactCount == 0.
*/
if (dlist_is_empty(&sxact->possibleUnsafeConflicts))
{
@ -3564,8 +3564,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
* xmin and purge any transactions which finished before this transaction
* was launched.
*
* For parallel queries in read-only transactions, it might run twice.
* We only release the reference on the first call.
* For parallel queries in read-only transactions, it might run twice. We
* only release the reference on the first call.
*/
needToClear = false;
if ((partiallyReleasing ||

View File

@ -331,7 +331,7 @@ InitProcess(void)
if (!dlist_is_empty(procgloballist))
{
MyProc = (PGPROC*) dlist_pop_head_node(procgloballist);
MyProc = (PGPROC *) dlist_pop_head_node(procgloballist);
SpinLockRelease(ProcStructLock);
}
else

View File

@ -597,9 +597,9 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum,
/*
* Even if we don't want to use fallocate, we can still extend a
* bit more efficiently than writing each 8kB block individually.
* pg_pwrite_zeros() (via FileZero()) uses
* pg_pwritev_with_retry() to avoid multiple writes or needing a
* zeroed buffer for the whole length of the extension.
* pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
* to avoid multiple writes or needing a zeroed buffer for the
* whole length of the extension.
*/
ret = FileZero(v->mdfd_vfd,
seekpos, (off_t) BLCKSZ * numblocks,

View File

@ -214,6 +214,7 @@ $bmap{'f'} = 'false';
my @fmgr_builtin_oid_index;
my $last_builtin_oid = 0;
my $fmgr_count = 0;
foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
{
next if $s->{lang} ne 'internal';

View File

@ -189,8 +189,7 @@ float4in_internal(char *num, char **endptr_p,
/*
* endptr points to the first character _after_ the sequence we recognized
* as a valid floating point number. orig_string points to the original
* input
* string.
* input string.
*/
/* skip leading whitespace */

View File

@ -306,7 +306,7 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
if (!item->value.args.left)
chld = pos;
else if (! flattenJsonPathParseItem(buf, &chld, escontext,
else if (!flattenJsonPathParseItem(buf, &chld, escontext,
item->value.args.left,
nestingLevel + argNestingLevel,
insideArraySubscript))
@ -315,7 +315,7 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
if (!item->value.args.right)
chld = pos;
else if (! flattenJsonPathParseItem(buf, &chld, escontext,
else if (!flattenJsonPathParseItem(buf, &chld, escontext,
item->value.args.right,
nestingLevel + argNestingLevel,
insideArraySubscript))
@ -338,7 +338,7 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
item->value.like_regex.patternlen);
appendStringInfoChar(buf, '\0');
if (! flattenJsonPathParseItem(buf, &chld, escontext,
if (!flattenJsonPathParseItem(buf, &chld, escontext,
item->value.like_regex.expr,
nestingLevel,
insideArraySubscript))
@ -360,7 +360,7 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
if (!item->value.arg)
chld = pos;
else if (! flattenJsonPathParseItem(buf, &chld, escontext,
else if (!flattenJsonPathParseItem(buf, &chld, escontext,
item->value.arg,
nestingLevel + argNestingLevel,
insideArraySubscript))
@ -405,7 +405,7 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
int32 topos;
int32 frompos;
if (! flattenJsonPathParseItem(buf, &frompos, escontext,
if (!flattenJsonPathParseItem(buf, &frompos, escontext,
item->value.array.elems[i].from,
nestingLevel, true))
return false;
@ -413,7 +413,7 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
if (item->value.array.elems[i].to)
{
if (! flattenJsonPathParseItem(buf, &topos, escontext,
if (!flattenJsonPathParseItem(buf, &topos, escontext,
item->value.array.elems[i].to,
nestingLevel, true))
return false;
@ -451,7 +451,7 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
if (item->next)
{
if (! flattenJsonPathParseItem(buf, &chld, escontext,
if (!flattenJsonPathParseItem(buf, &chld, escontext,
item->next, nestingLevel,
insideArraySubscript))
return false;

View File

@ -1794,8 +1794,7 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2,
else
#endif
result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
if (result == 2147483647) /* _NLSCMPERROR; missing from mingw
* headers */
if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */
ereport(ERROR,
(errmsg("could not compare Unicode strings: %m")));
@ -1826,6 +1825,7 @@ pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale)
{
size_t len1 = strlen(arg1);
size_t len2 = strlen(arg2);
result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale);
}
else
@ -2554,6 +2554,7 @@ uchar_length(UConverter *converter, const char *str, int32_t len)
{
UErrorCode status = U_ZERO_ERROR;
int32_t ulen;
ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status);
if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR)
ereport(ERROR,
@ -2571,6 +2572,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen,
{
UErrorCode status = U_ZERO_ERROR;
int32_t ulen;
status = U_ZERO_ERROR;
ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status);
if (U_FAILURE(status))
@ -2803,8 +2805,8 @@ icu_language_tag(const char *loc_str, int elevel)
return pstrdup("en-US-u-va-posix");
/*
* A BCP47 language tag doesn't have a clearly-defined upper limit
* (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
* A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
* RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop.
*/

View File

@ -1021,7 +1021,8 @@ hashbpchar(PG_FUNCTION_ARGS)
}
else
{
Size bsize, rsize;
Size bsize,
rsize;
char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
@ -1033,8 +1034,8 @@ hashbpchar(PG_FUNCTION_ARGS)
/*
* In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior
* must be preserved.
* character in the hash, but it was done before and the behavior must
* be preserved.
*/
result = hash_any((uint8_t *) buf, bsize + 1);
@ -1076,7 +1077,8 @@ hashbpcharextended(PG_FUNCTION_ARGS)
}
else
{
Size bsize, rsize;
Size bsize,
rsize;
char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
@ -1088,8 +1090,8 @@ hashbpcharextended(PG_FUNCTION_ARGS)
/*
* In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior
* must be preserved.
* character in the hash, but it was done before and the behavior must
* be preserved.
*/
result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1));

View File

@ -2312,8 +2312,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup)
memcpy(sss->buf1, authoritative_data, len);
/*
* pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated
* strings.
* pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings.
*/
sss->buf1[len] = '\0';
sss->last_len1 = len;

View File

@ -3630,7 +3630,7 @@ get_publication_name(Oid pubid, bool missing_ok)
* return InvalidOid.
*/
Oid
get_subscription_oid(const char* subname, bool missing_ok)
get_subscription_oid(const char *subname, bool missing_ok)
{
Oid oid;
@ -3653,7 +3653,7 @@ char *
get_subscription_name(Oid subid, bool missing_ok)
{
HeapTuple tup;
char* subname;
char *subname;
Form_pg_subscription subform;
tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));

View File

@ -3769,8 +3769,8 @@ RelationSetNewRelfilenumber(Relation relation, char persistence)
/*
* During a binary upgrade, we use this code path to ensure that
* pg_largeobject and its index have the same relfilenumbers as in
* the old cluster. This is necessary because pg_upgrade treats
* pg_largeobject and its index have the same relfilenumbers as in the
* old cluster. This is necessary because pg_upgrade treats
* pg_largeobject like a user table, not a system table. It is however
* possible that a table or index may need to end up with the same
* relfilenumber in the new cluster as what it had in the old cluster.
@ -5314,8 +5314,8 @@ restart:
* when the column value changes, thus require a separate
* attribute bitmapset.
*
* Obviously, non-key columns couldn't be referenced by
* foreign key or identity key. Hence we do not include them into
* Obviously, non-key columns couldn't be referenced by foreign
* key or identity key. Hence we do not include them into
* uindexattrs, pkindexattrs and idindexattrs bitmaps.
*/
if (attrnum != 0)

View File

@ -801,11 +801,11 @@ read_relmap_file(RelMapFile *map, char *dbpath, bool lock_held, int elevel)
/*
* Open the target file.
*
* Because Windows isn't happy about the idea of renaming over a file
* that someone has open, we only open this file after acquiring the lock,
* and for the same reason, we close it before releasing the lock. That
* way, by the time write_relmap_file() acquires an exclusive lock, no
* one else will have it open.
* Because Windows isn't happy about the idea of renaming over a file that
* someone has open, we only open this file after acquiring the lock, and
* for the same reason, we close it before releasing the lock. That way,
* by the time write_relmap_file() acquires an exclusive lock, no one else
* will have it open.
*/
snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath,
RELMAPPER_FILENAME);

View File

@ -9,8 +9,7 @@ use Getopt::Long;
my $outfile = '';
GetOptions(
'outfile=s' => \$outfile) or die "$0: wrong arguments";
GetOptions('outfile=s' => \$outfile) or die "$0: wrong arguments";
open my $errcodes, '<', $ARGV[0]
or die "$0: could not open input file '$ARGV[0]': $!\n";

View File

@ -933,10 +933,10 @@ InitPostgres(const char *in_dbname, Oid dboid,
}
/*
* The last few connection slots are reserved for superusers and roles with
* privileges of pg_use_reserved_connections. Replication connections are
* drawn from slots reserved with max_wal_senders and are not limited by
* max_connections, superuser_reserved_connections, or
* The last few connection slots are reserved for superusers and roles
* with privileges of pg_use_reserved_connections. Replication
* connections are drawn from slots reserved with max_wal_senders and are
* not limited by max_connections, superuser_reserved_connections, or
* reserved_connections.
*
* Note: At this point, the new backend has already claimed a proc struct,

View File

@ -67,9 +67,9 @@ SwitchToUntrustedUser(Oid userid, UserContext *context)
* This user can SET ROLE to the target user, but not the other way
* around, so protect ourselves against the target user by setting
* SECURITY_RESTRICTED_OPERATION to prevent certain changes to the
* session state. Also set up a new GUC nest level, so that we can roll
* back any GUC changes that may be made by code running as the target
* user, inasmuch as they could be malicious.
* session state. Also set up a new GUC nest level, so that we can
* roll back any GUC changes that may be made by code running as the
* target user, inasmuch as they could be malicious.
*/
sec_context |= SECURITY_RESTRICTED_OPERATION;
SetUserIdAndSecContext(userid, sec_context);

View File

@ -607,8 +607,10 @@ sub print_radix_table
# Print the next line's worth of values.
# XXX pad to begin at a nice boundary
printf $out " /* %02x */ ", $i;
for (my $j = 0;
$j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
for (
my $j = 0;
$j < $vals_per_line && $i <= $seg->{max_idx};
$j++)
{
# missing values represent zero.
my $val = $seg->{values}->{$i} || 0;

View File

@ -1470,8 +1470,8 @@ check_GUC_init(struct config_generic *gconf)
/* Flag combinations */
/*
* GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part
* of SHOW ALL should not be hidden in postgresql.conf.sample.
* GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of
* SHOW ALL should not be hidden in postgresql.conf.sample.
*/
if ((gconf->flags & GUC_NO_SHOW_ALL) &&
!(gconf->flags & GUC_NOT_IN_SAMPLE))

View File

@ -734,9 +734,9 @@ MemoryContextStatsDetail(MemoryContext context, int max_children,
*
* We don't buffer the information about all memory contexts in a
* backend into StringInfo and log it as one message. That would
* require the buffer to be enlarged, risking an OOM as there could
* be a large number of memory contexts in a backend. Instead, we
* log one message per memory context.
* require the buffer to be enlarged, risking an OOM as there could be
* a large number of memory contexts in a backend. Instead, we log
* one message per memory context.
*/
ereport(LOG_SERVER_ONLY,
(errhidestmt(true),

View File

@ -1438,8 +1438,8 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
* in memory, using a heap to eliminate excess tuples. Now we
* have to transform the heap to a properly-sorted array.
* Note that sort_bounded_heap sets the correct state->status.
* have to transform the heap to a properly-sorted array. Note
* that sort_bounded_heap sets the correct state->status.
*/
sort_bounded_heap(state);
state->current = 0;

View File

@ -1565,8 +1565,8 @@ static void
setup_auth(FILE *cmdfd)
{
/*
* The authid table shouldn't be readable except through views, to
* ensure passwords are not publicly visible.
* The authid table shouldn't be readable except through views, to ensure
* passwords are not publicly visible.
*/
PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n");
@ -1957,9 +1957,9 @@ make_template0(FILE *cmdfd)
" STRATEGY = file_copy;\n\n");
/*
* template0 shouldn't have any collation-dependent objects, so unset
* the collation version. This disables collation version checks when
* making a new database from it.
* template0 shouldn't have any collation-dependent objects, so unset the
* collation version. This disables collation version checks when making
* a new database from it.
*/
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n");
@ -1969,9 +1969,8 @@ make_template0(FILE *cmdfd)
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n");
/*
* Explicitly revoke public create-schema and create-temp-table
* privileges in template1 and template0; else the latter would be on
* by default
* Explicitly revoke public create-schema and create-temp-table privileges
* in template1 and template0; else the latter would be on by default
*/
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n");
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n");
@ -2264,8 +2263,8 @@ icu_language_tag(const char *loc_str)
return pstrdup("en-US-u-va-posix");
/*
* A BCP47 language tag doesn't have a clearly-defined upper limit
* (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
* A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
* RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop.
*/

View File

@ -132,8 +132,8 @@ if ($ENV{with_icu} eq 'yes')
command_fails_like(
[
'initdb', '--no-sync',
'--locale-provider=icu',
'--icu-locale=nonsense-nowhere', "$tempdir/dataX"
'--locale-provider=icu', '--icu-locale=nonsense-nowhere',
"$tempdir/dataX"
],
qr/error: locale "nonsense-nowhere" has unknown language "nonsense"/,
'fails for nonsense language');
@ -141,8 +141,8 @@ if ($ENV{with_icu} eq 'yes')
command_fails_like(
[
'initdb', '--no-sync',
'--locale-provider=icu',
'--icu-locale=@colNumeric=lower', "$tempdir/dataX"
'--locale-provider=icu', '--icu-locale=@colNumeric=lower',
"$tempdir/dataX"
],
qr/could not open collator for locale "und-u-kn-lower": U_ILLEGAL_ARGUMENT_ERROR/,
'fails for invalid collation argument');

View File

@ -369,8 +369,8 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
$node->clean_node;
plan skip_all =>
sprintf(
"Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")", $tupidx,
0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
"Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")",
$tupidx, 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
exit;
}

View File

@ -57,8 +57,10 @@ command_fails_like(
{
# like command_like but checking stderr
my $stderr;
my $result = IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
$walfiles[2] ], '2>', \$stderr;
my $result =
IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
$walfiles[2] ],
'2>', \$stderr;
ok($result, "pg_archivecleanup dry run: exit code 0");
like(
$stderr,

View File

@ -341,18 +341,18 @@ tablespace_list_append(const char *arg)
/*
* All tablespaces are created with absolute directories, so specifying a
* non-absolute path here would just never match, possibly confusing users.
* Since we don't know whether the remote side is Windows or not, and it
* might be different than the local side, permit any path that could be
* absolute under either set of rules.
* non-absolute path here would just never match, possibly confusing
* users. Since we don't know whether the remote side is Windows or not,
* and it might be different than the local side, permit any path that
* could be absolute under either set of rules.
*
* (There is little practical risk of confusion here, because someone
* running entirely on Linux isn't likely to have a relative path that
* begins with a backslash or something that looks like a drive
* specification. If they do, and they also incorrectly believe that
* a relative path is acceptable here, we'll silently fail to warn them
* of their mistake, and the -T option will just not get applied, same
* as if they'd specified -T for a nonexistent tablespace.)
* specification. If they do, and they also incorrectly believe that a
* relative path is acceptable here, we'll silently fail to warn them of
* their mistake, and the -T option will just not get applied, same as if
* they'd specified -T for a nonexistent tablespace.)
*/
if (!is_nonwindows_absolute_path(cell->old_dir) &&
!is_windows_absolute_path(cell->old_dir))

View File

@ -144,8 +144,7 @@ SKIP:
'gzip:long',
'invalid compression specification: compression algorithm "gzip" does not support long-distance mode',
'failure on long mode for gzip'
],
);
],);
for my $cft (@compression_failure_tests)
{
@ -923,7 +922,8 @@ $sigchld_bb->finish();
# Test that we can back up an in-place tablespace
$node->safe_psql('postgres',
"SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';");
"SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';"
);
$node->safe_psql('postgres',
"CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
. "INSERT INTO test2 VALUES (1234);");

View File

@ -19,11 +19,12 @@ typedef struct
WalWriteMethod *wwmethod;
off_t currpos;
char *pathname;
/*
* MORE DATA FOLLOWS AT END OF STRUCT
*
* Each WalWriteMethod is expected to embed this as the first member of
* a larger struct with method-specific fields following.
* Each WalWriteMethod is expected to embed this as the first member of a
* larger struct with method-specific fields following.
*/
} Walfile;
@ -107,11 +108,12 @@ struct WalWriteMethod
bool sync;
const char *lasterrstring; /* if set, takes precedence over lasterrno */
int lasterrno;
/*
* MORE DATA FOLLOWS AT END OF STRUCT
*
* Each WalWriteMethod is expected to embed this as the first member of
* a larger struct with method-specific fields following.
* Each WalWriteMethod is expected to embed this as the first member of a
* larger struct with method-specific fields following.
*/
};

View File

@ -651,8 +651,8 @@ LZ4Stream_gets(char *ptr, int size, CompressFileHandle *CFH)
return NULL;
/*
* Our caller expects the return string to be NULL terminated
* and we know that ret is greater than zero.
* Our caller expects the return string to be NULL terminated and we know
* that ret is greater than zero.
*/
ptr[ret - 1] = '\0';

View File

@ -387,6 +387,7 @@ RestoreArchive(Archive *AHX)
if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
{
char *errmsg = supports_compression(AH->compression_spec);
if (errmsg)
pg_fatal("cannot restore from compressed archive (%s)",
errmsg);
@ -2985,11 +2986,11 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
if (!te->hadDumper)
{
/*
* Special Case: If 'SEQUENCE SET' or anything to do with LOs, then
* it is considered a data entry. We don't need to check for the
* BLOBS entry or old-style BLOB COMMENTS, because they will have
* hadDumper = true ... but we do need to check new-style BLOB ACLs,
* comments, etc.
* Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it
* is considered a data entry. We don't need to check for the BLOBS
* entry or old-style BLOB COMMENTS, because they will have hadDumper
* = true ... but we do need to check new-style BLOB ACLs, comments,
* etc.
*/
if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
strcmp(te->desc, "BLOB") == 0 ||
@ -3480,6 +3481,7 @@ _getObjectDescription(PQExpBuffer buf, const TocEntry *te)
{
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
}
/*
* These object types require additional decoration. Fortunately, the
* information needed is exactly what's in the DROP command.
@ -3639,6 +3641,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
initPQExpBuffer(&temp);
_getObjectDescription(&temp, te);
/*
* If _getObjectDescription() didn't fill the buffer, then there is no
* owner.

View File

@ -684,10 +684,10 @@ _LoadLOs(ArchiveHandle *AH)
tarClose(AH, th);
/*
* Once we have found the first LO, stop at the first non-LO
* entry (which will be 'blobs.toc'). This coding would eat all
* the rest of the archive if there are no LOs ... but this
* function shouldn't be called at all in that case.
* Once we have found the first LO, stop at the first non-LO entry
* (which will be 'blobs.toc'). This coding would eat all the
* rest of the archive if there are no LOs ... but this function
* shouldn't be called at all in that case.
*/
if (foundLO)
break;

View File

@ -756,9 +756,9 @@ main(int argc, char **argv)
pg_fatal("%s", error_detail);
/*
* Disable support for zstd workers for now - these are based on threading,
* and it's unclear how it interacts with parallel dumps on platforms where
* that relies on threads too (e.g. Windows).
* Disable support for zstd workers for now - these are based on
* threading, and it's unclear how it interacts with parallel dumps on
* platforms where that relies on threads too (e.g. Windows).
*/
if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
@ -879,8 +879,8 @@ main(int argc, char **argv)
/*
* Dumping LOs is the default for dumps where an inclusion switch is not
* used (an "include everything" dump). -B can be used to exclude LOs
* from those dumps. -b can be used to include LOs even when an
* inclusion switch is used.
* from those dumps. -b can be used to include LOs even when an inclusion
* switch is used.
*
* -s means "schema only" and LOs are data, not schema, so we never
* include LOs when -s is used.
@ -915,8 +915,8 @@ main(int argc, char **argv)
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
*
* However, we do need to collect LO information as there may be
* comments or other information on LOs that we do need to dump out.
* However, we do need to collect LO information as there may be comments
* or other information on LOs that we do need to dump out.
*/
if (dopt.outputLOs || dopt.binary_upgrade)
getLOs(fout);
@ -3590,8 +3590,8 @@ getLOs(Archive *fout)
loinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
/*
* In binary-upgrade mode for LOs, we do *not* dump out the LO
* data, as it will be copied by pg_upgrade, which simply copies the
* In binary-upgrade mode for LOs, we do *not* dump out the LO data,
* as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
* pg_largeobject_metadata, after the dump is restored.
@ -14828,7 +14828,10 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
if (dopt->no_security_labels)
return;
/* Security labels are schema not data ... except large object labels are data */
/*
* Security labels are schema not data ... except large object labels are
* data
*/
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
@ -16632,10 +16635,12 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
{
appendPQExpBufferStr(q,
coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
/*
* PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
* indexes. Being able to create this was fixed, but we need to
* make the index distinct in order to be able to restore the dump.
* make the index distinct in order to be able to restore the
* dump.
*/
if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
appendPQExpBufferStr(q, " NULLS NOT DISTINCT");

View File

@ -996,8 +996,8 @@ dumpRoleMembership(PGconn *conn)
/*
* We can't dump these GRANT commands in arbitrary order, because a role
* that is named as a grantor must already have ADMIN OPTION on the
* role for which it is granting permissions, except for the bootstrap
* that is named as a grantor must already have ADMIN OPTION on the role
* for which it is granting permissions, except for the bootstrap
* superuser, who can always be named as the grantor.
*
* We handle this by considering these grants role by role. For each role,
@ -1005,8 +1005,8 @@ dumpRoleMembership(PGconn *conn)
* superuser. Every time we grant ADMIN OPTION on the role to some user,
* that user also becomes an allowable grantor. We make repeated passes
* over the grants for the role, each time dumping those whose grantors
* are allowable and which we haven't done yet. Eventually this should
* let us dump all the grants.
* are allowable and which we haven't done yet. Eventually this should let
* us dump all the grants.
*/
total = PQntuples(res);
while (start < total)

View File

@ -156,10 +156,8 @@ my %pgdump_runs = (
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
command => [
'pg_restore',
'-l', "$tempdir/compression_lz4_custom.dump",
],
command =>
[ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ],
expected => qr/Compression: lz4/,
name => 'data content is lz4 compressed'
},
@ -229,8 +227,7 @@ my %pgdump_runs = (
],
command_like => {
command => [
'pg_restore',
'-l', "$tempdir/compression_zstd_custom.dump",
'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump",
],
expected => qr/Compression: zstd/,
name => 'data content is zstd compressed'
@ -250,8 +247,8 @@ my %pgdump_runs = (
compress_cmd => {
program => $ENV{'ZSTD'},
args => [
'-z', '-f', '--rm',
"$tempdir/compression_zstd_dir/blobs.toc",
'-z', '-f',
'--rm', "$tempdir/compression_zstd_dir/blobs.toc",
"-o", "$tempdir/compression_zstd_dir/blobs.toc.zst",
],
},
@ -280,8 +277,8 @@ my %pgdump_runs = (
program => $ENV{'ZSTD'},
args => [
'-d', '-f',
"$tempdir/compression_zstd_plain.sql.zst",
"-o", "$tempdir/compression_zstd_plain.sql",
"$tempdir/compression_zstd_plain.sql.zst", "-o",
"$tempdir/compression_zstd_plain.sql",
],
},
},
@ -385,9 +382,9 @@ my %pgdump_runs = (
command_like => {
command =>
[ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ],
expected => $supports_gzip ?
qr/Compression: gzip/ :
qr/Compression: none/,
expected => $supports_gzip
? qr/Compression: gzip/
: qr/Compression: none/,
name => 'data content is gzip-compressed by default if available',
},
},
@ -410,17 +407,15 @@ my %pgdump_runs = (
command_like => {
command =>
[ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ],
expected => $supports_gzip ?
qr/Compression: gzip/ :
qr/Compression: none/,
expected => $supports_gzip ? qr/Compression: gzip/
: qr/Compression: none/,
name => 'data content is gzip-compressed by default',
},
glob_patterns => [
"$tempdir/defaults_dir_format/toc.dat",
"$tempdir/defaults_dir_format/blobs.toc",
$supports_gzip ?
"$tempdir/defaults_dir_format/*.dat.gz" :
"$tempdir/defaults_dir_format/*.dat",
$supports_gzip ? "$tempdir/defaults_dir_format/*.dat.gz"
: "$tempdir/defaults_dir_format/*.dat",
],
},
@ -468,7 +463,8 @@ my %pgdump_runs = (
},
exclude_measurement => {
dump_cmd => [
'pg_dump', '--no-sync',
'pg_dump',
'--no-sync',
"--file=$tempdir/exclude_measurement.sql",
'--exclude-table-and-children=dump_test.measurement',
'postgres',
@ -534,9 +530,8 @@ my %pgdump_runs = (
},
no_large_objects => {
dump_cmd => [
'pg_dump', '--no-sync',
"--file=$tempdir/no_large_objects.sql", '-B',
'postgres',
'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql",
'-B', 'postgres',
],
},
no_privs => {
@ -1339,8 +1334,7 @@ my %tests = (
},
'LO create (with no data)' => {
create_sql =>
'SELECT pg_catalog.lo_create(0);',
create_sql => 'SELECT pg_catalog.lo_create(0);',
regexp => qr/^
\QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n
\QSELECT pg_catalog.lo_close(0);\E
@ -1933,7 +1927,8 @@ my %tests = (
'CREATE COLLATION icu_collation' => {
create_order => 76,
create_sql => "CREATE COLLATION icu_collation (PROVIDER = icu, LOCALE = 'en-US-u-va-posix');",
create_sql =>
"CREATE COLLATION icu_collation (PROVIDER = icu, LOCALE = 'en-US-u-va-posix');",
regexp =>
qr/CREATE COLLATION public.icu_collation \(provider = icu, locale = 'en-US-u-va-posix'(, version = '[^']*')?\);/m,
icu => 1,
@ -3119,9 +3114,7 @@ my %tests = (
\Q);\E
/xm,
like => {
%full_runs,
%dump_test_schema_runs,
section_pre_data => 1,
%full_runs, %dump_test_schema_runs, section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
@ -3290,7 +3283,8 @@ my %tests = (
\QEXECUTE FUNCTION dump_test.trigger_func();\E
/xm,
like => {
%full_runs, %dump_test_schema_runs, section_post_data => 1,
%full_runs, %dump_test_schema_runs,
section_post_data => 1,
only_dump_measurement => 1,
},
unlike => {
@ -3301,7 +3295,8 @@ my %tests = (
'COPY measurement' => {
create_order => 93,
create_sql => 'INSERT INTO dump_test.measurement (city_id, logdate, peaktemp, unitsales) '
create_sql =>
'INSERT INTO dump_test.measurement (city_id, logdate, peaktemp, unitsales) '
. "VALUES (1, '2006-02-12', 35, 1);",
regexp => qr/^
\QCOPY dump_test_second_schema.measurement_y2006m2 (city_id, logdate, peaktemp, unitsales) FROM stdin;\E
@ -4768,12 +4763,16 @@ foreach my $run (sort keys %pgdump_runs)
my $run_db = 'postgres';
# Skip command-level tests for gzip/lz4/zstd if the tool is not supported
if ($pgdump_runs{$run}->{compile_option} &&
(($pgdump_runs{$run}->{compile_option} eq 'gzip' && !$supports_gzip) ||
($pgdump_runs{$run}->{compile_option} eq 'lz4' && !$supports_lz4) ||
($pgdump_runs{$run}->{compile_option} eq 'zstd' && !$supports_zstd)))
if ($pgdump_runs{$run}->{compile_option}
&& (($pgdump_runs{$run}->{compile_option} eq 'gzip'
&& !$supports_gzip)
|| ($pgdump_runs{$run}->{compile_option} eq 'lz4'
&& !$supports_lz4)
|| ($pgdump_runs{$run}->{compile_option} eq 'zstd'
&& !$supports_zstd)))
{
note "$run: skipped due to no $pgdump_runs{$run}->{compile_option} support";
note
"$run: skipped due to no $pgdump_runs{$run}->{compile_option} support";
next;
}
@ -4800,16 +4799,18 @@ foreach my $run (sort keys %pgdump_runs)
foreach my $glob_pattern (@{$glob_patterns})
{
my @glob_output = glob($glob_pattern);
is(scalar(@glob_output) > 0, 1, "$run: glob check for $glob_pattern");
is(scalar(@glob_output) > 0,
1, "$run: glob check for $glob_pattern");
}
}
if ($pgdump_runs{$run}->{command_like})
{
my $cmd_like = $pgdump_runs{$run}->{command_like};
$node->command_like(\@{ $cmd_like->{command} },
$node->command_like(
\@{ $cmd_like->{command} },
$cmd_like->{expected},
"$run: " . $cmd_like->{name})
"$run: " . $cmd_like->{name});
}
if ($pgdump_runs{$run}->{restore_cmd})

View File

@ -105,8 +105,8 @@ check_and_dump_old_cluster(bool live_check)
check_for_isn_and_int8_passing_mismatch(&old_cluster);
/*
* PG 16 increased the size of the 'aclitem' type, which breaks the on-disk
* format for existing data.
* PG 16 increased the size of the 'aclitem' type, which breaks the
* on-disk format for existing data.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1500)
check_for_aclitem_data_type_usage(&old_cluster);

View File

@ -138,11 +138,12 @@ $oldnode->start;
my $result;
$result = $oldnode->safe_psql(
'postgres', "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
'postgres',
"SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
FROM pg_database WHERE datname='template0'");
is($result, "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
"check locales in original cluster"
);
is( $result,
"$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
"check locales in original cluster");
# The default location of the source code is the root of this directory.
my $srcdir = abs_path("../../..");
@ -337,8 +338,7 @@ command_fails(
'-s', $newnode->host,
'-p', $oldnode->port,
'-P', $newnode->port,
$mode,
'--check',
$mode, '--check',
],
'run of pg_upgrade --check for new instance with incorrect binary path');
ok(-d $newnode->data_dir . "/pg_upgrade_output.d",
@ -352,8 +352,7 @@ command_ok(
'-D', $newnode->data_dir, '-b', $oldbindir,
'-B', $newbindir, '-s', $newnode->host,
'-p', $oldnode->port, '-P', $newnode->port,
$mode,
'--check',
$mode, '--check',
],
'run of pg_upgrade --check for new instance');
ok(!-d $newnode->data_dir . "/pg_upgrade_output.d",
@ -396,11 +395,12 @@ if (-d $log_path)
# Test that upgraded cluster has original locale settings.
$result = $newnode->safe_psql(
'postgres', "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
'postgres',
"SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
FROM pg_database WHERE datname='template0'");
is($result, "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
"check that locales in new cluster match original cluster"
);
is( $result,
"$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
"check that locales in new cluster match original cluster");
# Second dump from the upgraded instance.
@dump_command = (

View File

@ -52,10 +52,10 @@ my @test_configuration = (
},
{
'compression_method' => 'zstd',
'backup_flags' => ['--compress', 'client-zstd:level=1,long'],
'backup_flags' => [ '--compress', 'client-zstd:level=1,long' ],
'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
'decompress_flags' => [ '-d' ],
'decompress_flags' => ['-d'],
'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{

View File

@ -4511,7 +4511,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
/* header line width in expanded mode */
else if (strcmp(param, "xheader_width") == 0)
{
if (! value)
if (!value)
;
else if (pg_strcasecmp(value, "full") == 0)
popt->topt.expanded_header_width_type = PRINT_XHEADER_FULL;
@ -5063,15 +5063,16 @@ pset_value_string(const char *param, printQueryOpt *popt)
else if (strcmp(param, "xheader_width") == 0)
{
if (popt->topt.expanded_header_width_type == PRINT_XHEADER_FULL)
return(pstrdup("full"));
return pstrdup("full");
else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_COLUMN)
return(pstrdup("column"));
return pstrdup("column");
else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_PAGE)
return(pstrdup("page"));
return pstrdup("page");
else
{
/* must be PRINT_XHEADER_EXACT_WIDTH */
char wbuff[32];
snprintf(wbuff, sizeof(wbuff), "%d",
popt->topt.expanded_header_exact_width);
return pstrdup(wbuff);

View File

@ -1432,7 +1432,7 @@ ExecQueryAndProcessResults(const char *query,
INSTR_TIME_SET_ZERO(before);
if (pset.bind_flag)
success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char * const *) pset.bind_params, NULL, NULL, 0);
success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char *const *) pset.bind_params, NULL, NULL, 0);
else
success = PQsendQuery(pset.db, query);

View File

@ -96,7 +96,8 @@ typedef struct _psqlSettings
char *gset_prefix; /* one-shot prefix argument for \gset */
bool gdesc_flag; /* one-shot request to describe query result */
bool gexec_flag; /* one-shot request to execute query result */
bool bind_flag; /* one-shot request to use extended query protocol */
bool bind_flag; /* one-shot request to use extended query
* protocol */
int bind_nparams; /* number of parameters */
char **bind_params; /* parameters for extended query protocol call */
bool crosstab_flag; /* one-shot request to crosstab result */

View File

@ -348,16 +348,12 @@ psql_like(
qr/1\|value\|2022-07-04 00:00:00
2|test|2022-07-03 00:00:00
3|test|2022-07-05 00:00:00/,
'\copy from with DEFAULT'
);
'\copy from with DEFAULT');
# Check \watch
# Note: the interval value is parsed with locale-aware strtod()
psql_like(
$node,
sprintf('SELECT 1 \watch c=3 i=%g', 0.01),
qr/1\n1\n1/,
'\watch with 3 iterations');
psql_like($node, sprintf('SELECT 1 \watch c=3 i=%g', 0.01),
qr/1\n1\n1/, '\watch with 3 iterations');
# Check \watch errors
psql_fails_like(

View File

@ -71,7 +71,8 @@ delete $ENV{LS_COLORS};
# completion tests is too variable.
if ($ENV{TESTDATADIR})
{
chdir $ENV{TESTDATADIR} or die "could not chdir to \"$ENV{TESTDATADIR}\": $!";
chdir $ENV{TESTDATADIR}
or die "could not chdir to \"$ENV{TESTDATADIR}\": $!";
}
# Create some junk files for filename completion testing.

View File

@ -40,8 +40,10 @@ if ($ENV{with_icu} eq 'yes')
$node->issues_sql_like(
[
'createdb', '-T',
'template0', '-E', 'UTF8', '--locale-provider=icu',
'--locale=C', '--icu-locale=en', 'foobar5'
'template0', '-E',
'UTF8', '--locale-provider=icu',
'--locale=C', '--icu-locale=en',
'foobar5'
],
qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/,
'create database with ICU locale specified');
@ -65,16 +67,25 @@ if ($ENV{with_icu} eq 'yes')
# additional node, which uses the icu provider
my $node2 = PostgreSQL::Test::Cluster->new('icu');
$node2->init(extra => ['--locale-provider=icu', '--icu-locale=en']);
$node2->init(extra => [ '--locale-provider=icu', '--icu-locale=en' ]);
$node2->start;
$node2->command_ok(
[ 'createdb', '-T', 'template0', '--locale-provider=libc', 'foobar55' ],
'create database with libc provider from template database with icu provider');
[
'createdb', '-T',
'template0', '--locale-provider=libc',
'foobar55'
],
'create database with libc provider from template database with icu provider'
);
$node2->command_ok(
[ 'createdb', '-T', 'template0', '--icu-locale', 'en-US', 'foobar56' ],
'create database with icu locale from template database with icu provider');
[
'createdb', '-T', 'template0', '--icu-locale', 'en-US',
'foobar56'
],
'create database with icu locale from template database with icu provider'
);
}
else
{
@ -163,17 +174,11 @@ $node->issues_sql_like(
[ 'createdb', '-T', 'foobar2', '-O', 'role_foobar', 'foobar8' ],
qr/statement: CREATE DATABASE foobar8 OWNER role_foobar TEMPLATE foobar2/,
'create database with owner role_foobar');
($ret, $stdout, $stderr) = $node->psql(
'foobar2',
'DROP OWNED BY role_foobar;',
on_error_die => 1,
);
($ret, $stdout, $stderr) =
$node->psql('foobar2', 'DROP OWNED BY role_foobar;', on_error_die => 1,);
ok($ret == 0, "DROP OWNED BY role_foobar");
($ret, $stdout, $stderr) = $node->psql(
'foobar2',
'DROP DATABASE foobar8;',
on_error_die => 1,
);
($ret, $stdout, $stderr) =
$node->psql('foobar2', 'DROP DATABASE foobar8;', on_error_die => 1,);
ok($ret == 0, "DROP DATABASE foobar8");
done_testing();

View File

@ -53,7 +53,8 @@ my $fetch_toast_relfilenodes =
WHERE b.oid IN ('pg_constraint'::regclass, 'test1'::regclass)};
# Same for relfilenodes of normal indexes. This saves the relfilenode
# from an index of pg_constraint, and from the index of the test table.
my $fetch_index_relfilenodes = qq{SELECT i.indrelid, a.oid::regclass::text, a.oid, a.relfilenode
my $fetch_index_relfilenodes =
qq{SELECT i.indrelid, a.oid::regclass::text, a.oid, a.relfilenode
FROM pg_class a
JOIN pg_index i ON (i.indexrelid = a.oid)
WHERE a.relname IN ('pg_constraint_oid_index', 'test1x')};

View File

@ -18,8 +18,7 @@ use PerfectHash;
my $output_path = '.';
GetOptions(
'outdir:s' => \$output_path);
GetOptions('outdir:s' => \$output_path);
my $output_table_file = "$output_path/unicode_norm_table.h";
my $output_func_file = "$output_path/unicode_norm_hashfunc.h";

View File

@ -1295,10 +1295,11 @@ print_aligned_vertical_line(const printTableOpt *topt,
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth)));
if (opt_border == 1)
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 3)));
/*
* Handling the xheader width for border=2 doesn't make
* much sense because this format has an additional
* right border, but keep this for consistency.
* Handling the xheader width for border=2 doesn't make much
* sense because this format has an additional right border,
* but keep this for consistency.
*/
if (opt_border == 2)
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 7)));

View File

@ -550,6 +550,7 @@ extern void gistSplitByKey(Relation r, Page page, IndexTuple *itup,
/* gistbuild.c */
extern IndexBuildResult *gistbuild(Relation heap, Relation index,
struct IndexInfo *indexInfo);
/* gistbuildbuffers.c */
extern GISTBuildBuffers *gistInitBuildBuffers(int pagesPerBuffer, int levelStep,
int maxLevel);

View File

@ -332,6 +332,7 @@ extern XLogReaderState *XLogReaderAllocate(int wal_segment_size,
const char *waldir,
XLogReaderRoutine *routine,
void *private_data);
/* Free an XLogReader */
extern void XLogReaderFree(XLogReaderState *state);

View File

@ -537,29 +537,29 @@
# array
{ aggfnoid => 'array_agg(anynonarray)', aggtransfn => 'array_agg_transfn',
aggcombinefn => 'array_agg_combine', aggserialfn => 'array_agg_serialize',
aggdeserialfn => 'array_agg_deserialize', aggfinalfn => 'array_agg_finalfn',
aggfinalextra => 't', aggtranstype => 'internal' },
aggfinalfn => 'array_agg_finalfn', aggcombinefn => 'array_agg_combine',
aggserialfn => 'array_agg_serialize',
aggdeserialfn => 'array_agg_deserialize', aggfinalextra => 't',
aggtranstype => 'internal' },
{ aggfnoid => 'array_agg(anyarray)', aggtransfn => 'array_agg_array_transfn',
aggfinalfn => 'array_agg_array_finalfn',
aggcombinefn => 'array_agg_array_combine',
aggserialfn => 'array_agg_array_serialize',
aggdeserialfn => 'array_agg_array_deserialize',
aggfinalfn => 'array_agg_array_finalfn', aggfinalextra => 't',
aggdeserialfn => 'array_agg_array_deserialize', aggfinalextra => 't',
aggtranstype => 'internal' },
# text
{ aggfnoid => 'string_agg(text,text)', aggtransfn => 'string_agg_transfn',
aggcombinefn => 'string_agg_combine', aggserialfn => 'string_agg_serialize',
aggdeserialfn => 'string_agg_deserialize',
aggfinalfn => 'string_agg_finalfn', aggtranstype => 'internal' },
aggfinalfn => 'string_agg_finalfn', aggcombinefn => 'string_agg_combine',
aggserialfn => 'string_agg_serialize',
aggdeserialfn => 'string_agg_deserialize', aggtranstype => 'internal' },
# bytea
{ aggfnoid => 'string_agg(bytea,bytea)',
aggtransfn => 'bytea_string_agg_transfn',
aggcombinefn => 'string_agg_combine',
aggserialfn => 'string_agg_serialize',
aggdeserialfn => 'string_agg_deserialize',
aggfinalfn => 'bytea_string_agg_finalfn', aggtranstype => 'internal' },
aggfinalfn => 'bytea_string_agg_finalfn',
aggcombinefn => 'string_agg_combine', aggserialfn => 'string_agg_serialize',
aggdeserialfn => 'string_agg_deserialize', aggtranstype => 'internal' },
# range
{ aggfnoid => 'range_intersect_agg(anyrange)',

View File

@ -18,6 +18,7 @@
datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
datallowconn => 't', datconnlimit => '-1', datfrozenxid => '0',
datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE',
datctype => 'LC_CTYPE', daticulocale => 'ICU_LOCALE', daticurules => 'ICU_RULES', datacl => '_null_' },
datctype => 'LC_CTYPE', daticulocale => 'ICU_LOCALE',
daticurules => 'ICU_RULES', datacl => '_null_' },
]

View File

@ -1667,8 +1667,9 @@
prorettype => 'internal', proargtypes => 'internal anyarray',
prosrc => 'array_agg_array_transfn' },
{ oid => '6296', descr => 'aggregate combine function',
proname => 'array_agg_array_combine', proisstrict => 'f', prorettype => 'internal',
proargtypes => 'internal internal', prosrc => 'array_agg_array_combine' },
proname => 'array_agg_array_combine', proisstrict => 'f',
prorettype => 'internal', proargtypes => 'internal internal',
prosrc => 'array_agg_array_combine' },
{ oid => '6297', descr => 'aggregate serial function',
proname => 'array_agg_array_serialize', prorettype => 'bytea',
proargtypes => 'internal', prosrc => 'array_agg_array_serialize' },
@ -5481,10 +5482,9 @@
prorettype => 'oid', proargtypes => 'int4',
prosrc => 'pg_stat_get_backend_dbid' },
{ oid => '6107', descr => 'statistics: get subtransaction status of backend',
proname => 'pg_stat_get_backend_subxact', provolatile => 's', proparallel => 'r',
prorettype => 'record', proargtypes => 'int4',
proallargtypes => '{int4,int4,bool}',
proargmodes => '{i,o,o}',
proname => 'pg_stat_get_backend_subxact', provolatile => 's',
proparallel => 'r', prorettype => 'record', proargtypes => 'int4',
proallargtypes => '{int4,int4,bool}', proargmodes => '{i,o,o}',
proargnames => '{bid,subxact_count,subxact_overflowed}',
prosrc => 'pg_stat_get_backend_subxact' },
{ oid => '1939', descr => 'statistics: user ID of backend',
@ -5731,9 +5731,9 @@
prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_buf_alloc' },
{ oid => '6214', descr => 'statistics: per backend type IO statistics',
proname => 'pg_stat_get_io', provolatile => 'v',
prorows => '30', proretset => 't',
proparallel => 'r', prorettype => 'record', proargtypes => '',
proname => 'pg_stat_get_io', prorows => '30', proretset => 't',
provolatile => 'v', proparallel => 'r', prorettype => 'record',
proargtypes => '',
proallargtypes => '{text,text,text,int8,float8,int8,float8,int8,float8,int8,float8,int8,int8,int8,int8,int8,float8,timestamptz}',
proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}',
proargnames => '{backend_type,object,context,reads,read_time,writes,write_time,writebacks,writeback_time,extends,extend_time,op_bytes,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}',
@ -6407,8 +6407,9 @@
proname => 'pg_switch_wal', provolatile => 'v', prorettype => 'pg_lsn',
proargtypes => '', prosrc => 'pg_switch_wal' },
{ oid => '6305', descr => 'log details of the current snapshot to WAL',
proname => 'pg_log_standby_snapshot', provolatile => 'v', prorettype => 'pg_lsn',
proargtypes => '', prosrc => 'pg_log_standby_snapshot' },
proname => 'pg_log_standby_snapshot', provolatile => 'v',
prorettype => 'pg_lsn', proargtypes => '',
prosrc => 'pg_log_standby_snapshot' },
{ oid => '3098', descr => 'create a named restore point',
proname => 'pg_create_restore_point', provolatile => 'v',
prorettype => 'pg_lsn', proargtypes => 'text',
@ -10349,15 +10350,15 @@
proargtypes => 'internal', prosrc => 'window_dense_rank_support' },
{ oid => '3103', descr => 'fractional rank within partition',
proname => 'percent_rank', prosupport => 'window_percent_rank_support',
prokind => 'w', proisstrict => 'f', prorettype => 'float8',
proargtypes => '', prosrc => 'window_percent_rank' },
prokind => 'w', proisstrict => 'f', prorettype => 'float8', proargtypes => '',
prosrc => 'window_percent_rank' },
{ oid => '6306', descr => 'planner support for percent_rank',
proname => 'window_percent_rank_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'window_percent_rank_support' },
{ oid => '3104', descr => 'fractional row number within partition',
proname => 'cume_dist', prosupport => 'window_cume_dist_support',
prokind => 'w', proisstrict => 'f', prorettype => 'float8',
proargtypes => '', prosrc => 'window_cume_dist' },
prokind => 'w', proisstrict => 'f', prorettype => 'float8', proargtypes => '',
prosrc => 'window_cume_dist' },
{ oid => '6307', descr => 'planner support for cume_dist',
proname => 'window_cume_dist_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'window_cume_dist_support' },
@ -11824,7 +11825,8 @@
provariadic => 'text', proretset => 't', provolatile => 's',
prorettype => 'record', proargtypes => '_text',
proallargtypes => '{_text,oid,oid,int2vector,pg_node_tree}',
proargmodes => '{v,o,o,o,o}', proargnames => '{pubname,pubid,relid,attrs,qual}',
proargmodes => '{v,o,o,o,o}',
proargnames => '{pubname,pubid,relid,attrs,qual}',
prosrc => 'pg_get_publication_tables' },
{ oid => '6121',
descr => 'returns whether a relation can be part of a publication',

View File

@ -90,8 +90,8 @@ CATALOG(pg_subscription,6100,SubscriptionRelationId) BKI_SHARED_RELATION BKI_ROW
bool subpasswordrequired; /* Must connection use a password? */
bool subrunasowner; /* True if replication should execute as
* the subscription owner */
bool subrunasowner; /* True if replication should execute as the
* subscription owner */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
/* Connection string to the publisher */

View File

@ -69,9 +69,12 @@ typedef enum printTextLineWrap
typedef enum printXheaderWidthType
{
/* Expanded header line width variants */
PRINT_XHEADER_FULL, /* do not truncate header line (this is the default) */
PRINT_XHEADER_COLUMN, /* only print header line above the first column */
PRINT_XHEADER_PAGE, /* header line must not be longer than terminal width */
PRINT_XHEADER_FULL, /* do not truncate header line (this is the
* default) */
PRINT_XHEADER_COLUMN, /* only print header line above the first
* column */
PRINT_XHEADER_PAGE, /* header line must not be longer than
* terminal width */
PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */
} printXheaderWidthType;
@ -110,8 +113,10 @@ typedef struct printTableOpt
enum printFormat format; /* see enum above */
unsigned short int expanded; /* expanded/vertical output (if supported
* by output format); 0=no, 1=yes, 2=auto */
printXheaderWidthType expanded_header_width_type; /* width type for header line in expanded mode */
int expanded_header_exact_width; /* explicit width for header line in expanded mode */
printXheaderWidthType expanded_header_width_type; /* width type for header
* line in expanded mode */
int expanded_header_exact_width; /* explicit width for header
* line in expanded mode */
unsigned short int border; /* Print a border around the table. 0=none,
* 1=dividing lines, 2=full */
unsigned short int pager; /* use pager for output (if to stdout and

View File

@ -231,6 +231,7 @@ HeapTupleGetDatum(const HeapTupleData *tuple)
{
return HeapTupleHeaderGetDatum(tuple->t_data);
}
/* obsolete version of above */
#define TupleGetDatum(_slot, _tuple) HeapTupleGetDatum(_tuple)

Some files were not shown because too many files have changed in this diff Show More