Pre-beta mechanical code beautification.

Run pgindent, pgperltidy, and reformat-dat-files.

This set of diffs is a bit larger than typical.  We've updated to
pg_bsd_indent 2.1.2, which properly indents variable declarations that
have multi-line initialization expressions (the continuation lines are
now indented one tab stop).  We've also updated to perltidy version
20230309 and changed some of its settings, which reduces its desire to
add whitespace to lines to make assignments etc. line up.  Going
forward, that should make for fewer random-seeming changes to existing
code.

Discussion: https://postgr.es/m/20230428092545.qfb3y5wcu4cm75ur@alvherre.pgsql
This commit is contained in:
Tom Lane 2023-05-19 17:24:48 -04:00
parent df6b19fbbc
commit 0245f8db36
402 changed files with 4756 additions and 4427 deletions

View File

@ -38,30 +38,35 @@ $node->safe_psql('postgres', q(CREATE TABLE tbl(i int)));
my $main_h = $node->background_psql('postgres'); my $main_h = $node->background_psql('postgres');
$main_h->query_safe(q( $main_h->query_safe(
q(
BEGIN; BEGIN;
INSERT INTO tbl VALUES(0); INSERT INTO tbl VALUES(0);
)); ));
my $cic_h = $node->background_psql('postgres'); my $cic_h = $node->background_psql('postgres');
$cic_h->query_until(qr/start/, q( $cic_h->query_until(
qr/start/, q(
\echo start \echo start
CREATE INDEX CONCURRENTLY idx ON tbl(i); CREATE INDEX CONCURRENTLY idx ON tbl(i);
)); ));
$main_h->query_safe(q( $main_h->query_safe(
q(
PREPARE TRANSACTION 'a'; PREPARE TRANSACTION 'a';
)); ));
$main_h->query_safe(q( $main_h->query_safe(
q(
BEGIN; BEGIN;
INSERT INTO tbl VALUES(0); INSERT INTO tbl VALUES(0);
)); ));
$node->safe_psql('postgres', q(COMMIT PREPARED 'a';)); $node->safe_psql('postgres', q(COMMIT PREPARED 'a';));
$main_h->query_safe(q( $main_h->query_safe(
q(
PREPARE TRANSACTION 'b'; PREPARE TRANSACTION 'b';
BEGIN; BEGIN;
INSERT INTO tbl VALUES(0); INSERT INTO tbl VALUES(0);
@ -69,7 +74,8 @@ INSERT INTO tbl VALUES(0);
$node->safe_psql('postgres', q(COMMIT PREPARED 'b';)); $node->safe_psql('postgres', q(COMMIT PREPARED 'b';));
$main_h->query_safe(q( $main_h->query_safe(
q(
PREPARE TRANSACTION 'c'; PREPARE TRANSACTION 'c';
COMMIT PREPARED 'c'; COMMIT PREPARED 'c';
)); ));
@ -97,7 +103,8 @@ PREPARE TRANSACTION 'persists_forever';
$node->restart; $node->restart;
my $reindex_h = $node->background_psql('postgres'); my $reindex_h = $node->background_psql('postgres');
$reindex_h->query_until(qr/start/, q( $reindex_h->query_until(
qr/start/, q(
\echo start \echo start
DROP INDEX CONCURRENTLY idx; DROP INDEX CONCURRENTLY idx;
CREATE INDEX CONCURRENTLY idx ON tbl(i); CREATE INDEX CONCURRENTLY idx ON tbl(i);

View File

@ -484,9 +484,9 @@ verify_heapam(PG_FUNCTION_ARGS)
/* /*
* Since we've checked that this redirect points to a line * Since we've checked that this redirect points to a line
* pointer between FirstOffsetNumber and maxoff, it should * pointer between FirstOffsetNumber and maxoff, it should now
* now be safe to fetch the referenced line pointer. We expect * be safe to fetch the referenced line pointer. We expect it
* it to be LP_NORMAL; if not, that's corruption. * to be LP_NORMAL; if not, that's corruption.
*/ */
rditem = PageGetItemId(ctx.page, rdoffnum); rditem = PageGetItemId(ctx.page, rdoffnum);
if (!ItemIdIsUsed(rditem)) if (!ItemIdIsUsed(rditem))
@ -610,8 +610,8 @@ verify_heapam(PG_FUNCTION_ARGS)
{ {
/* /*
* We should not have set successor[ctx.offnum] to a value * We should not have set successor[ctx.offnum] to a value
* other than InvalidOffsetNumber unless that line pointer * other than InvalidOffsetNumber unless that line pointer is
* is LP_NORMAL. * LP_NORMAL.
*/ */
Assert(ItemIdIsNormal(next_lp)); Assert(ItemIdIsNormal(next_lp));
@ -642,8 +642,8 @@ verify_heapam(PG_FUNCTION_ARGS)
} }
/* /*
* If the next line pointer is a redirect, or if it's a tuple * If the next line pointer is a redirect, or if it's a tuple but
* but the XMAX of this tuple doesn't match the XMIN of the next * the XMAX of this tuple doesn't match the XMIN of the next
* tuple, then the two aren't part of the same update chain and * tuple, then the two aren't part of the same update chain and
* there is nothing more to do. * there is nothing more to do.
*/ */
@ -667,8 +667,8 @@ verify_heapam(PG_FUNCTION_ARGS)
} }
/* /*
* This tuple and the tuple to which it points seem to be part * This tuple and the tuple to which it points seem to be part of
* of an update chain. * an update chain.
*/ */
predecessor[nextoffnum] = ctx.offnum; predecessor[nextoffnum] = ctx.offnum;
@ -721,8 +721,8 @@ verify_heapam(PG_FUNCTION_ARGS)
} }
/* /*
* If the current tuple's xmin is aborted but the successor tuple's * If the current tuple's xmin is aborted but the successor
* xmin is in-progress or committed, that's corruption. * tuple's xmin is in-progress or committed, that's corruption.
*/ */
if (xmin_commit_status_ok[ctx.offnum] && if (xmin_commit_status_ok[ctx.offnum] &&
xmin_commit_status[ctx.offnum] == XID_ABORTED && xmin_commit_status[ctx.offnum] == XID_ABORTED &&
@ -1897,8 +1897,8 @@ FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx)
diff = (int32) (ctx->next_xid - xid); diff = (int32) (ctx->next_xid - xid);
/* /*
* In cases of corruption we might see a 32bit xid that is before epoch * In cases of corruption we might see a 32bit xid that is before epoch 0.
* 0. We can't represent that as a 64bit xid, due to 64bit xids being * We can't represent that as a 64bit xid, due to 64bit xids being
* unsigned integers, without the modulo arithmetic of 32bit xid. There's * unsigned integers, without the modulo arithmetic of 32bit xid. There's
* no really nice way to deal with that, but it works ok enough to use * no really nice way to deal with that, but it works ok enough to use
* FirstNormalFullTransactionId in that case, as a freshly initdb'd * FirstNormalFullTransactionId in that case, as a freshly initdb'd

View File

@ -407,8 +407,8 @@ basic_archive_shutdown(ArchiveModuleState *state)
MemoryContext basic_archive_context; MemoryContext basic_archive_context;
/* /*
* If we didn't get to storing the pointer to our allocated state, we don't * If we didn't get to storing the pointer to our allocated state, we
* have anything to clean up. * don't have anything to clean up.
*/ */
if (data == NULL) if (data == NULL)
return; return;

View File

@ -2024,9 +2024,8 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
/* /*
* Should never get called when the insert is being performed on a table * Should never get called when the insert is being performed on a table
* that is also among the target relations of an UPDATE operation, * that is also among the target relations of an UPDATE operation, because
* because postgresBeginForeignInsert() currently rejects such insert * postgresBeginForeignInsert() currently rejects such insert attempts.
* attempts.
*/ */
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL); Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
@ -5173,9 +5172,9 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
&can_tablesample); &can_tablesample);
/* /*
* Make sure we're not choosing TABLESAMPLE when the remote relation does * Make sure we're not choosing TABLESAMPLE when the remote relation
* not support that. But only do this for "auto" - if the user explicitly * does not support that. But only do this for "auto" - if the user
* requested BERNOULLI/SYSTEM, it's better to fail. * explicitly requested BERNOULLI/SYSTEM, it's better to fail.
*/ */
if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO)) if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
method = ANALYZE_SAMPLE_RANDOM; method = ANALYZE_SAMPLE_RANDOM;
@ -5189,35 +5188,35 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
else else
{ {
/* /*
* All supported sampling methods require sampling rate, * All supported sampling methods require sampling rate, not
* not target rows directly, so we calculate that using * target rows directly, so we calculate that using the remote
* the remote reltuples value. That's imperfect, because * reltuples value. That's imperfect, because it might be off a
* it might be off a good deal, but that's not something * good deal, but that's not something we can (or should) address
* we can (or should) address here. * here.
* *
* If reltuples is too low (i.e. when table grew), we'll * If reltuples is too low (i.e. when table grew), we'll end up
* end up sampling more rows - but then we'll apply the * sampling more rows - but then we'll apply the local sampling,
* local sampling, so we get the expected sample size. * so we get the expected sample size. This is the same outcome as
* This is the same outcome as without remote sampling. * without remote sampling.
* *
* If reltuples is too high (e.g. after bulk DELETE), we * If reltuples is too high (e.g. after bulk DELETE), we will end
* will end up sampling too few rows. * up sampling too few rows.
* *
* We can't really do much better here - we could try * We can't really do much better here - we could try sampling a
* sampling a bit more rows, but we don't know how off * bit more rows, but we don't know how off the reltuples value is
* the reltuples value is so how much is "a bit more"? * so how much is "a bit more"?
* *
* Furthermore, the targrows value for partitions is * Furthermore, the targrows value for partitions is determined
* determined based on table size (relpages), which can * based on table size (relpages), which can be off in different
* be off in different ways too. Adjusting the sampling * ways too. Adjusting the sampling rate here might make the issue
* rate here might make the issue worse. * worse.
*/ */
sample_frac = targrows / reltuples; sample_frac = targrows / reltuples;
/* /*
* We should never get sampling rate outside the valid range * We should never get sampling rate outside the valid range
* (between 0.0 and 1.0), because those cases should be covered * (between 0.0 and 1.0), because those cases should be covered by
* by the previous branch that sets ANALYZE_SAMPLE_OFF. * the previous branch that sets ANALYZE_SAMPLE_OFF.
*/ */
Assert(sample_frac >= 0.0 && sample_frac <= 1.0); Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
} }

View File

@ -700,8 +700,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
} }
/* /*
* If we found a scan key eliminating the range, no need to * If we found a scan key eliminating the range, no need
* check additional ones. * to check additional ones.
*/ */
if (!addrange) if (!addrange)
break; break;
@ -1801,8 +1801,8 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
bval = &dtup->bt_columns[keyno]; bval = &dtup->bt_columns[keyno];
/* /*
* Does the range have actual NULL values? Either of the flags can * Does the range have actual NULL values? Either of the flags can be
* be set, but we ignore the state before adding first row. * set, but we ignore the state before adding first row.
* *
* We have to remember this, because we'll modify the flags and we * We have to remember this, because we'll modify the flags and we
* need to know if the range started as empty. * need to know if the range started as empty.
@ -1842,12 +1842,12 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
/* /*
* If the range was had actual NULL values (i.e. did not start empty), * If the range was had actual NULL values (i.e. did not start empty),
* make sure we don't forget about the NULL values. Either the allnulls * make sure we don't forget about the NULL values. Either the
* flag is still set to true, or (if the opclass cleared it) we need to * allnulls flag is still set to true, or (if the opclass cleared it)
* set hasnulls=true. * we need to set hasnulls=true.
* *
* XXX This can only happen when the opclass modified the tuple, so the * XXX This can only happen when the opclass modified the tuple, so
* modified flag should be set. * the modified flag should be set.
*/ */
if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls)) if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls))
{ {
@ -1859,9 +1859,9 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
/* /*
* After updating summaries for all the keys, mark it as not empty. * After updating summaries for all the keys, mark it as not empty.
* *
* If we're actually changing the flag value (i.e. tuple started as empty), * If we're actually changing the flag value (i.e. tuple started as
* we should have modified the tuple. So we should not see empty range that * empty), we should have modified the tuple. So we should not see empty
* was not modified. * range that was not modified.
*/ */
Assert(!dtup->bt_empty_range || modified); Assert(!dtup->bt_empty_range || modified);
dtup->bt_empty_range = false; dtup->bt_empty_range = false;

View File

@ -289,7 +289,8 @@ hashtext(PG_FUNCTION_ARGS)
} }
else else
{ {
Size bsize, rsize; Size bsize,
rsize;
char *buf; char *buf;
const char *keydata = VARDATA_ANY(key); const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key); size_t keylen = VARSIZE_ANY_EXHDR(key);
@ -304,8 +305,8 @@ hashtext(PG_FUNCTION_ARGS)
/* /*
* In principle, there's no reason to include the terminating NUL * In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior * character in the hash, but it was done before and the behavior must
* must be preserved. * be preserved.
*/ */
result = hash_any((uint8_t *) buf, bsize + 1); result = hash_any((uint8_t *) buf, bsize + 1);
@ -343,7 +344,8 @@ hashtextextended(PG_FUNCTION_ARGS)
} }
else else
{ {
Size bsize, rsize; Size bsize,
rsize;
char *buf; char *buf;
const char *keydata = VARDATA_ANY(key); const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key); size_t keylen = VARSIZE_ANY_EXHDR(key);
@ -357,8 +359,8 @@ hashtextextended(PG_FUNCTION_ARGS)
/* /*
* In principle, there's no reason to include the terminating NUL * In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior * character in the hash, but it was done before and the behavior must
* must be preserved. * be preserved.
*/ */
result = hash_any_extended((uint8_t *) buf, bsize + 1, result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1)); PG_GETARG_INT64(1));

View File

@ -334,8 +334,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
* Note: heap_update returns the tid (location) of the new tuple in the * Note: heap_update returns the tid (location) of the new tuple in the
* t_self field. * t_self field.
* *
* If the update is not HOT, we must update all indexes. If the update * If the update is not HOT, we must update all indexes. If the update is
* is HOT, it could be that we updated summarized columns, so we either * HOT, it could be that we updated summarized columns, so we either
* update only summarized indexes, or none at all. * update only summarized indexes, or none at all.
*/ */
if (result != TM_Ok) if (result != TM_Ok)

View File

@ -389,6 +389,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED); Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
Assert(params->truncate != VACOPTVALUE_UNSPECIFIED && Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
params->truncate != VACOPTVALUE_AUTO); params->truncate != VACOPTVALUE_AUTO);
/* /*
* While VacuumFailSafeActive is reset to false before calling this, we * While VacuumFailSafeActive is reset to false before calling this, we
* still need to reset it here due to recursive calls. * still need to reset it here due to recursive calls.
@ -1813,12 +1814,12 @@ retry:
{ {
/* /*
* We have no freeze plans to execute, so there's no added cost * We have no freeze plans to execute, so there's no added cost
* from following the freeze path. That's why it was chosen. * from following the freeze path. That's why it was chosen. This
* This is important in the case where the page only contains * is important in the case where the page only contains totally
* totally frozen tuples at this point (perhaps only following * frozen tuples at this point (perhaps only following pruning).
* pruning). Such pages can be marked all-frozen in the VM by our * Such pages can be marked all-frozen in the VM by our caller,
* caller, even though none of its tuples were newly frozen here * even though none of its tuples were newly frozen here (note
* (note that the "no freeze" path never sets pages all-frozen). * that the "no freeze" path never sets pages all-frozen).
* *
* We never increment the frozen_pages instrumentation counter * We never increment the frozen_pages instrumentation counter
* here, since it only counts pages with newly frozen tuples * here, since it only counts pages with newly frozen tuples

View File

@ -375,8 +375,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace); shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
/* /*
* Serialize the transaction snapshot if the transaction * Serialize the transaction snapshot if the transaction isolation
* isolation level uses a transaction snapshot. * level uses a transaction snapshot.
*/ */
if (IsolationUsesXactSnapshot()) if (IsolationUsesXactSnapshot())
{ {
@ -1497,8 +1497,8 @@ ParallelWorkerMain(Datum main_arg)
RestoreClientConnectionInfo(clientconninfospace); RestoreClientConnectionInfo(clientconninfospace);
/* /*
* Initialize SystemUser now that MyClientConnectionInfo is restored. * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
* Also ensure that auth_method is actually valid, aka authn_id is not NULL. * ensure that auth_method is actually valid, aka authn_id is not NULL.
*/ */
if (MyClientConnectionInfo.authn_id) if (MyClientConnectionInfo.authn_id)
InitializeSystemUser(MyClientConnectionInfo.authn_id, InitializeSystemUser(MyClientConnectionInfo.authn_id,

View File

@ -3152,10 +3152,9 @@ CommitTransactionCommand(void)
break; break;
/* /*
* The user issued a SAVEPOINT inside a transaction block. * The user issued a SAVEPOINT inside a transaction block. Start a
* Start a subtransaction. (DefineSavepoint already did * subtransaction. (DefineSavepoint already did PushTransaction,
* PushTransaction, so as to have someplace to put the SUBBEGIN * so as to have someplace to put the SUBBEGIN state.)
* state.)
*/ */
case TBLOCK_SUBBEGIN: case TBLOCK_SUBBEGIN:
StartSubTransaction(); StartSubTransaction();

View File

@ -5460,8 +5460,8 @@ StartupXLOG(void)
missingContrecPtr = endOfRecoveryInfo->missingContrecPtr; missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
/* /*
* Reset ps status display, so as no information related to recovery * Reset ps status display, so as no information related to recovery shows
* shows up. * up.
*/ */
set_ps_display(""); set_ps_display("");
@ -5596,9 +5596,9 @@ StartupXLOG(void)
if (!XLogRecPtrIsInvalid(missingContrecPtr)) if (!XLogRecPtrIsInvalid(missingContrecPtr))
{ {
/* /*
* We should only have a missingContrecPtr if we're not switching to * We should only have a missingContrecPtr if we're not switching to a
* a new timeline. When a timeline switch occurs, WAL is copied from * new timeline. When a timeline switch occurs, WAL is copied from the
* the old timeline to the new only up to the end of the last complete * old timeline to the new only up to the end of the last complete
* record, so there can't be an incomplete WAL record that we need to * record, so there can't be an incomplete WAL record that we need to
* disregard. * disregard.
*/ */

View File

@ -897,8 +897,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
* *
* XLogReader machinery is only able to handle records up to a certain * XLogReader machinery is only able to handle records up to a certain
* size (ignoring machine resource limitations), so make sure that we will * size (ignoring machine resource limitations), so make sure that we will
* not emit records larger than the sizes advertised to be supported. * not emit records larger than the sizes advertised to be supported. This
* This cap is based on DecodeXLogRecordRequiredSpace(). * cap is based on DecodeXLogRecordRequiredSpace().
*/ */
if (total_len >= XLogRecordMaxSize) if (total_len >= XLogRecordMaxSize)
ereport(ERROR, ereport(ERROR,

View File

@ -1609,10 +1609,10 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename,
* *
* There's no guarantee that this will actually * There's no guarantee that this will actually
* happen, though: the torn write could take an * happen, though: the torn write could take an
* arbitrarily long time to complete. Retrying multiple * arbitrarily long time to complete. Retrying
* times wouldn't fix this problem, either, though * multiple times wouldn't fix this problem, either,
* it would reduce the chances of it happening in * though it would reduce the chances of it happening
* practice. The only real fix here seems to be to * in practice. The only real fix here seems to be to
* have some kind of interlock that allows us to wait * have some kind of interlock that allows us to wait
* until we can be certain that no write to the block * until we can be certain that no write to the block
* is in progress. Since we don't have any such thing * is in progress. Since we don't have any such thing

View File

@ -350,6 +350,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
tupdesc = CreateTemplateTupleDesc(2); tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0); TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0);
/* /*
* int8 may seem like a surprising data type for this, but in theory int4 * int8 may seem like a surprising data type for this, but in theory int4
* would not be wide enough for this, as TimeLineID is unsigned. * would not be wide enough for this, as TimeLineID is unsigned.

View File

@ -243,8 +243,8 @@ sub ParseHeader
# BKI_LOOKUP implicitly makes an FK reference # BKI_LOOKUP implicitly makes an FK reference
push @{ $catalog{foreign_keys} }, push @{ $catalog{foreign_keys} },
{ {
is_array => is_array => (
($atttype eq 'oidvector' || $atttype eq '_oid') $atttype eq 'oidvector' || $atttype eq '_oid')
? 1 ? 1
: 0, : 0,
is_opt => $column{lookup_opt}, is_opt => $column{lookup_opt},

View File

@ -3389,8 +3389,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask,
result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)); result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
/* /*
* Check if ACL_MAINTAIN is being checked and, if so, and not already set as * Check if ACL_MAINTAIN is being checked and, if so, and not already set
* part of the result, then check if the user is a member of the * as part of the result, then check if the user is a member of the
* pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH * pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH
* MATERIALIZED VIEW, and REINDEX on all relations. * MATERIALIZED VIEW, and REINDEX on all relations.
*/ */

View File

@ -148,8 +148,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple,
#endif /* USE_ASSERT_CHECKING */ #endif /* USE_ASSERT_CHECKING */
/* /*
* Skip insertions into non-summarizing indexes if we only need * Skip insertions into non-summarizing indexes if we only need to
* to update summarizing indexes. * update summarizing indexes.
*/ */
if (onlySummarized && !indexInfo->ii_Summarizing) if (onlySummarized && !indexInfo->ii_Summarizing)
continue; continue;

View File

@ -1414,6 +1414,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
/* FALLTHROUGH */ /* FALLTHROUGH */
case SHARED_DEPENDENCY_OWNER: case SHARED_DEPENDENCY_OWNER:
/* /*
* Save it for deletion below, if it's a local object or a * Save it for deletion below, if it's a local object or a
* role grant. Other shared objects, such as databases, * role grant. Other shared objects, such as databases,

View File

@ -487,6 +487,7 @@ pg_collation_actual_version(PG_FUNCTION_ARGS)
/* retrieve from pg_database */ /* retrieve from pg_database */
HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId)); HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
if (!HeapTupleIsValid(dbtup)) if (!HeapTupleIsValid(dbtup))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT), (errcode(ERRCODE_UNDEFINED_OBJECT),
@ -507,6 +508,7 @@ pg_collation_actual_version(PG_FUNCTION_ARGS)
/* retrieve from pg_collation */ /* retrieve from pg_collation */
HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
if (!HeapTupleIsValid(colltp)) if (!HeapTupleIsValid(colltp))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT), (errcode(ERRCODE_UNDEFINED_OBJECT),
@ -657,11 +659,10 @@ create_collation_from_locale(const char *locale, int nspid,
Oid collid; Oid collid;
/* /*
* Some systems have locale names that don't consist entirely of * Some systems have locale names that don't consist entirely of ASCII
* ASCII letters (such as "bokm&aring;l" or "fran&ccedil;ais"). * letters (such as "bokm&aring;l" or "fran&ccedil;ais"). This is pretty
* This is pretty silly, since we need the locale itself to * silly, since we need the locale itself to interpret the non-ASCII
* interpret the non-ASCII characters. We can't do much with * characters. We can't do much with those, so we filter them out.
* those, so we filter them out.
*/ */
if (!pg_is_ascii(locale)) if (!pg_is_ascii(locale))
{ {
@ -687,13 +688,12 @@ create_collation_from_locale(const char *locale, int nspid,
(*nvalidp)++; (*nvalidp)++;
/* /*
* Create a collation named the same as the locale, but quietly * Create a collation named the same as the locale, but quietly doing
* doing nothing if it already exists. This is the behavior we * nothing if it already exists. This is the behavior we need even at
* need even at initdb time, because some versions of "locale -a" * initdb time, because some versions of "locale -a" can report the same
* can report the same locale name more than once. And it's * locale name more than once. And it's convenient for later import runs,
* convenient for later import runs, too, since you just about * too, since you just about always want to add on new locales without a
* always want to add on new locales without a lot of chatter * lot of chatter about existing ones.
* about existing ones.
*/ */
collid = CollationCreate(locale, nspid, GetUserId(), collid = CollationCreate(locale, nspid, GetUserId(),
COLLPROVIDER_LIBC, true, enc, COLLPROVIDER_LIBC, true, enc,
@ -995,8 +995,8 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
param.nvalidp = &nvalid; param.nvalidp = &nvalid;
/* /*
* Enumerate the locales that are either installed on or supported * Enumerate the locales that are either installed on or supported by
* by the OS. * the OS.
*/ */
if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL, if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL,
(LPARAM) &param, NULL)) (LPARAM) &param, NULL))

View File

@ -1406,8 +1406,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
* If we're going to be reading data for the to-be-created database into * If we're going to be reading data for the to-be-created database into
* shared_buffers, take a lock on it. Nobody should know that this * shared_buffers, take a lock on it. Nobody should know that this
* database exists yet, but it's good to maintain the invariant that an * database exists yet, but it's good to maintain the invariant that an
* AccessExclusiveLock on the database is sufficient to drop all * AccessExclusiveLock on the database is sufficient to drop all of its
* of its buffers without worrying about more being read later. * buffers without worrying about more being read later.
* *
* Note that we need to do this before entering the * Note that we need to do this before entering the
* PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback

View File

@ -493,6 +493,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
case OBJECT_TABLE: case OBJECT_TABLE:
case OBJECT_TABLESPACE: case OBJECT_TABLESPACE:
case OBJECT_VIEW: case OBJECT_VIEW:
/* /*
* These are handled elsewhere, so if someone gets here the code * These are handled elsewhere, so if someone gets here the code
* is probably wrong or should be revisited. * is probably wrong or should be revisited.

View File

@ -3066,11 +3066,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
/* /*
* The table can be reindexed if the user has been granted MAINTAIN on * The table can be reindexed if the user has been granted MAINTAIN on
* the table or one of its partition ancestors or the user is a * the table or one of its partition ancestors or the user is a
* superuser, the table owner, or the database/schema owner (but in the * superuser, the table owner, or the database/schema owner (but in
* latter case, only if it's not a shared relation). pg_class_aclcheck * the latter case, only if it's not a shared relation).
* includes the superuser case, and depending on objectKind we already * pg_class_aclcheck includes the superuser case, and depending on
* know that the user has permission to run REINDEX on this database or * objectKind we already know that the user has permission to run
* schema per the permission checks at the beginning of this routine. * REINDEX on this database or schema per the permission checks at the
* beginning of this routine.
*/ */
if (classtuple->relisshared && if (classtuple->relisshared &&
pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK && pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&

View File

@ -604,9 +604,9 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)"); PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)");
/* /*
* We don't want to allow unprivileged users to be able to trigger attempts * We don't want to allow unprivileged users to be able to trigger
* to access arbitrary network destinations, so require the user to have * attempts to access arbitrary network destinations, so require the user
* been specifically authorized to create subscriptions. * to have been specifically authorized to create subscriptions.
*/ */
if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION)) if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION))
ereport(ERROR, ereport(ERROR,
@ -1837,8 +1837,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
* current owner must have CREATE on database * current owner must have CREATE on database
* *
* This is consistent with how ALTER SCHEMA ... OWNER TO works, but some * This is consistent with how ALTER SCHEMA ... OWNER TO works, but some
* other object types behave differently (e.g. you can't give a table to * other object types behave differently (e.g. you can't give a table to a
* a user who lacks CREATE privileges on a schema). * user who lacks CREATE privileges on a schema).
*/ */
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId,
GetUserId(), ACL_CREATE); GetUserId(), ACL_CREATE);

View File

@ -535,8 +535,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
* *
* The grantor of record for this implicit grant is the bootstrap * The grantor of record for this implicit grant is the bootstrap
* superuser, which means that the CREATEROLE user cannot revoke the * superuser, which means that the CREATEROLE user cannot revoke the
* grant. They can however grant the created role back to themselves * grant. They can however grant the created role back to themselves with
* with different options, since they enjoy ADMIN OPTION on it. * different options, since they enjoy ADMIN OPTION on it.
*/ */
if (!superuser()) if (!superuser())
{ {
@ -561,8 +561,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
BOOTSTRAP_SUPERUSERID, &poptself); BOOTSTRAP_SUPERUSERID, &poptself);
/* /*
* We must make the implicit grant visible to the code below, else * We must make the implicit grant visible to the code below, else the
* the additional grants will fail. * additional grants will fail.
*/ */
CommandCounterIncrement(); CommandCounterIncrement();
@ -585,8 +585,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
* Add the specified members to this new role. adminmembers get the admin * Add the specified members to this new role. adminmembers get the admin
* option, rolemembers don't. * option, rolemembers don't.
* *
* NB: No permissions check is required here. If you have enough rights * NB: No permissions check is required here. If you have enough rights to
* to create a role, you can add any members you like. * create a role, you can add any members you like.
*/ */
AddRoleMems(currentUserId, stmt->role, roleid, AddRoleMems(currentUserId, stmt->role, roleid,
rolemembers, roleSpecsToIds(rolemembers), rolemembers, roleSpecsToIds(rolemembers),
@ -1021,9 +1021,9 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
shdepLockAndCheckObject(AuthIdRelationId, roleid); shdepLockAndCheckObject(AuthIdRelationId, roleid);
/* /*
* To mess with a superuser you gotta be superuser; otherwise you * To mess with a superuser you gotta be superuser; otherwise you need
* need CREATEROLE plus admin option on the target role; unless you're * CREATEROLE plus admin option on the target role; unless you're just
* just trying to change your own settings * trying to change your own settings
*/ */
if (roleform->rolsuper) if (roleform->rolsuper)
{ {
@ -1546,8 +1546,8 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt)
/* /*
* Step through all of the granted roles and add, update, or remove * Step through all of the granted roles and add, update, or remove
* entries in pg_auth_members as appropriate. If stmt->is_grant is true, * entries in pg_auth_members as appropriate. If stmt->is_grant is true,
* we are adding new grants or, if they already exist, updating options * we are adding new grants or, if they already exist, updating options on
* on those grants. If stmt->is_grant is false, we are revoking grants or * those grants. If stmt->is_grant is false, we are revoking grants or
* removing options from them. * removing options from them.
*/ */
foreach(item, stmt->granted_roles) foreach(item, stmt->granted_roles)
@ -1848,8 +1848,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
ObjectIdGetDatum(grantorId)); ObjectIdGetDatum(grantorId));
/* /*
* If we found a tuple, update it with new option values, unless * If we found a tuple, update it with new option values, unless there
* there are no changes, in which case issue a WARNING. * are no changes, in which case issue a WARNING.
* *
* If we didn't find a tuple, just insert one. * If we didn't find a tuple, just insert one.
*/ */
@ -2332,8 +2332,8 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions,
/* /*
* If popt.specified == 0, we're revoking the grant entirely; otherwise, * If popt.specified == 0, we're revoking the grant entirely; otherwise,
* we expect just one bit to be set, and we're revoking the corresponding * we expect just one bit to be set, and we're revoking the corresponding
* option. As of this writing, there's no syntax that would allow for * option. As of this writing, there's no syntax that would allow for an
* an attempt to revoke multiple options at once, and the logic below * attempt to revoke multiple options at once, and the logic below
* wouldn't work properly if such syntax were added, so assert that our * wouldn't work properly if such syntax were added, so assert that our
* caller isn't trying to do that. * caller isn't trying to do that.
*/ */

View File

@ -354,8 +354,8 @@ ExecInsertIndexTuples(ResultRelInfo *resultRelInfo,
continue; continue;
/* /*
* Skip processing of non-summarizing indexes if we only * Skip processing of non-summarizing indexes if we only update
* update summarizing indexes * summarizing indexes
*/ */
if (onlySummarizing && !indexInfo->ii_Summarizing) if (onlySummarizing && !indexInfo->ii_Summarizing)
continue; continue;

View File

@ -1330,18 +1330,18 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
BufFile *file = *fileptr; BufFile *file = *fileptr;
/* /*
* The batch file is lazily created. If this is the first tuple * The batch file is lazily created. If this is the first tuple written to
* written to this batch, the batch file is created and its buffer is * this batch, the batch file is created and its buffer is allocated in
* allocated in the spillCxt context, NOT in the batchCxt. * the spillCxt context, NOT in the batchCxt.
* *
* During the build phase, buffered files are created for inner * During the build phase, buffered files are created for inner batches.
* batches. Each batch's buffered file is closed (and its buffer freed) * Each batch's buffered file is closed (and its buffer freed) after the
* after the batch is loaded into memory during the outer side scan. * batch is loaded into memory during the outer side scan. Therefore, it
* Therefore, it is necessary to allocate the batch file buffer in a * is necessary to allocate the batch file buffer in a memory context
* memory context which outlives the batch itself. * which outlives the batch itself.
* *
* Also, we use spillCxt instead of hashCxt for a better accounting of * Also, we use spillCxt instead of hashCxt for a better accounting of the
* the spilling memory consumption. * spilling memory consumption.
*/ */
if (file == NULL) if (file == NULL)
{ {

View File

@ -799,9 +799,9 @@ llvm_session_initialize(void)
LLVMInitializeNativeAsmParser(); LLVMInitializeNativeAsmParser();
/* /*
* When targeting an LLVM version with opaque pointers enabled by * When targeting an LLVM version with opaque pointers enabled by default,
* default, turn them off for the context we build our code in. We don't * turn them off for the context we build our code in. We don't need to
* need to do so for other contexts (e.g. llvm_ts_context). Once the IR is * do so for other contexts (e.g. llvm_ts_context). Once the IR is
* generated, it carries the necessary information. * generated, it carries the necessary information.
*/ */
#if LLVM_VERSION_MAJOR > 14 #if LLVM_VERSION_MAJOR > 14

View File

@ -2127,8 +2127,7 @@ llvm_compile_expr(ExprState *state)
/* /*
* pergroup = &aggstate->all_pergroups * pergroup = &aggstate->all_pergroups
* [op->d.agg_trans.setoff] * [op->d.agg_trans.setoff] [op->d.agg_trans.transno];
* [op->d.agg_trans.transno];
*/ */
v_allpergroupsp = v_allpergroupsp =
l_load_struct_gep(b, v_aggstatep, l_load_struct_gep(b, v_aggstatep,

View File

@ -527,8 +527,8 @@ secure_open_gssapi(Port *port)
/* /*
* Use the configured keytab, if there is one. As we now require MIT * Use the configured keytab, if there is one. As we now require MIT
* Kerberos, we might consider using the credential store extensions in the * Kerberos, we might consider using the credential store extensions in
* future instead of the environment variable. * the future instead of the environment variable.
*/ */
if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0') if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0')
{ {

View File

@ -1104,8 +1104,8 @@ prepare_cert_name(char *name)
if (namelen > MAXLEN) if (namelen > MAXLEN)
{ {
/* /*
* Keep the end of the name, not the beginning, since the most specific * Keep the end of the name, not the beginning, since the most
* field is likely to give users the most information. * specific field is likely to give users the most information.
*/ */
truncated = name + namelen - MAXLEN; truncated = name + namelen - MAXLEN;
truncated[0] = truncated[1] = truncated[2] = '.'; truncated[0] = truncated[1] = truncated[2] = '.';
@ -1165,8 +1165,8 @@ verify_cb(int ok, X509_STORE_CTX *ctx)
/* /*
* Get the Subject and Issuer for logging, but don't let maliciously * Get the Subject and Issuer for logging, but don't let maliciously
* huge certs flood the logs, and don't reflect non-ASCII bytes into it * huge certs flood the logs, and don't reflect non-ASCII bytes into
* either. * it either.
*/ */
subject = X509_NAME_to_cstring(X509_get_subject_name(cert)); subject = X509_NAME_to_cstring(X509_get_subject_name(cert));
sub_prepared = prepare_cert_name(subject); sub_prepared = prepare_cert_name(subject);

View File

@ -2693,8 +2693,9 @@ load_hba(void)
if (!ok) if (!ok)
{ {
/* /*
* File contained one or more errors, so bail out. MemoryContextDelete * File contained one or more errors, so bail out.
* is enough to clean up everything, including regexes. * MemoryContextDelete is enough to clean up everything, including
* regexes.
*/ */
MemoryContextDelete(hbacxt); MemoryContextDelete(hbacxt);
return false; return false;
@ -3056,8 +3057,9 @@ load_ident(void)
if (!ok) if (!ok)
{ {
/* /*
* File contained one or more errors, so bail out. MemoryContextDelete * File contained one or more errors, so bail out.
* is enough to clean up everything, including regexes. * MemoryContextDelete is enough to clean up everything, including
* regexes.
*/ */
MemoryContextDelete(ident_context); MemoryContextDelete(ident_context);
return false; return false;

View File

@ -165,8 +165,8 @@ transformMergeStmt(ParseState *pstate, MergeStmt *stmt)
/* /*
* Set up the MERGE target table. The target table is added to the * Set up the MERGE target table. The target table is added to the
* namespace below and to joinlist in transform_MERGE_to_join, so don't * namespace below and to joinlist in transform_MERGE_to_join, so don't do
* do it here. * it here.
*/ */
qry->resultRelation = setTargetTable(pstate, stmt->relation, qry->resultRelation = setTargetTable(pstate, stmt->relation,
stmt->relation->inh, stmt->relation->inh,

View File

@ -2340,9 +2340,9 @@ merge_default_partitions(PartitionMap *outer_map,
/* /*
* The default partitions have to be joined with each other, so merge * The default partitions have to be joined with each other, so merge
* them. Note that each of the default partitions isn't merged yet * them. Note that each of the default partitions isn't merged yet
* (see, process_outer_partition()/process_inner_partition()), so * (see, process_outer_partition()/process_inner_partition()), so they
* they should be merged successfully. The merged partition will act * should be merged successfully. The merged partition will act as
* as the default partition of the join relation. * the default partition of the join relation.
*/ */
Assert(outer_merged_index == -1); Assert(outer_merged_index == -1);
Assert(inner_merged_index == -1); Assert(inner_merged_index == -1);

View File

@ -58,8 +58,8 @@ fork_process(void)
/* /*
* We start postmaster children with signals blocked. This allows them to * We start postmaster children with signals blocked. This allows them to
* install their own handlers before unblocking, to avoid races where they * install their own handlers before unblocking, to avoid races where they
* might run the postmaster's handler and miss an important control signal. * might run the postmaster's handler and miss an important control
* With more analysis this could potentially be relaxed. * signal. With more analysis this could potentially be relaxed.
*/ */
sigprocmask(SIG_SETMASK, &BlockSig, &save_mask); sigprocmask(SIG_SETMASK, &BlockSig, &save_mask);
result = fork(); result = fork();

View File

@ -759,6 +759,7 @@ lexescape(struct vars *v)
RETV(PLAIN, c); RETV(PLAIN, c);
break; break;
default: default:
/* /*
* Throw an error for unrecognized ASCII alpha escape sequences, * Throw an error for unrecognized ASCII alpha escape sequences,
* which reserves them for future use if needed. * which reserves them for future use if needed.

View File

@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
* invalidated when this WAL record is replayed; and further, * invalidated when this WAL record is replayed; and further,
* slot creation fails when wal_level is not sufficient; but * slot creation fails when wal_level is not sufficient; but
* all these operations are not synchronized, so a logical * all these operations are not synchronized, so a logical
* slot may creep in while the wal_level is being * slot may creep in while the wal_level is being reduced.
* reduced. Hence this extra check. * Hence this extra check.
*/ */
if (xlrec->wal_level < WAL_LEVEL_LOGICAL) if (xlrec->wal_level < WAL_LEVEL_LOGICAL)
{ {

View File

@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin,
MemoryContext old_context; MemoryContext old_context;
/* /*
* On a standby, this check is also required while creating the * On a standby, this check is also required while creating the slot.
* slot. Check the comments in the function. * Check the comments in the function.
*/ */
CheckLogicalDecodingRequirements(); CheckLogicalDecodingRequirements();

View File

@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
ReorderBufferTXN *txn; ReorderBufferTXN *txn;
/* /*
* Bail out if logical_replication_mode is buffered and we haven't exceeded * Bail out if logical_replication_mode is buffered and we haven't
* the memory limit. * exceeded the memory limit.
*/ */
if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED && if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED &&
rb->size < logical_decoding_work_mem * 1024L) rb->size < logical_decoding_work_mem * 1024L)
@ -4010,10 +4010,10 @@ ReorderBufferStreamTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
* After that we need to reuse the snapshot from the previous run. * After that we need to reuse the snapshot from the previous run.
* *
* Unlike DecodeCommit which adds xids of all the subtransactions in * Unlike DecodeCommit which adds xids of all the subtransactions in
* snapshot's xip array via SnapBuildCommitTxn, we can't do that here * snapshot's xip array via SnapBuildCommitTxn, we can't do that here but
* but we do add them to subxip array instead via ReorderBufferCopySnap. * we do add them to subxip array instead via ReorderBufferCopySnap. This
* This allows the catalog changes made in subtransactions decoded till * allows the catalog changes made in subtransactions decoded till now to
* now to be visible. * be visible.
*/ */
if (txn->snapshot_now == NULL) if (txn->snapshot_now == NULL)
{ {

View File

@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
*/ */
/* /*
* xl_running_xacts record is older than what we can use, we might not have * xl_running_xacts record is older than what we can use, we might not
* all necessary catalog rows anymore. * have all necessary catalog rows anymore.
*/ */
if (TransactionIdIsNormal(builder->initial_xmin_horizon) && if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
NormalTransactionIdPrecedes(running->oldestRunningXid, NormalTransactionIdPrecedes(running->oldestRunningXid,

View File

@ -5080,10 +5080,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
} }
/* /*
* If we are processing this transaction using a parallel apply worker then * If we are processing this transaction using a parallel apply worker
* either we send the changes to the parallel worker or if the worker is busy * then either we send the changes to the parallel worker or if the worker
* then serialize the changes to the file which will later be processed by * is busy then serialize the changes to the file which will later be
* the parallel worker. * processed by the parallel worker.
*/ */
*winfo = pa_find_worker(xid); *winfo = pa_find_worker(xid);
@ -5097,9 +5097,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
} }
/* /*
* If there is no parallel worker involved to process this transaction then * If there is no parallel worker involved to process this transaction
* we either directly apply the change or serialize it to a file which will * then we either directly apply the change or serialize it to a file
* later be applied when the transaction finish message is processed. * which will later be applied when the transaction finish message is
* processed.
*/ */
else if (in_streamed_transaction) else if (in_streamed_transaction)
{ {

View File

@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications,
* are multiple lists (one for each operation) to which row filters will * are multiple lists (one for each operation) to which row filters will
* be appended. * be appended.
* *
* FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter
* filter expression" so it takes precedence. * expression" so it takes precedence.
*/ */
foreach(lc, publications) foreach(lc, publications)
{ {

View File

@ -48,8 +48,7 @@ our @languages = qw(
our %ascii_languages = ( our %ascii_languages = (
'hindi' => 'english', 'hindi' => 'english',
'russian' => 'english', 'russian' => 'english',);
);
GetOptions( GetOptions(
'depfile' => \$depfile, 'depfile' => \$depfile,

View File

@ -98,8 +98,7 @@ struct BufFile
/* /*
* XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid * XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid
* wasting per-file alignment padding when some users create many * wasting per-file alignment padding when some users create many files.
* files.
*/ */
PGAlignedBlock buffer; PGAlignedBlock buffer;
}; };

View File

@ -357,14 +357,15 @@ dsm_impl_posix_resize(int fd, off_t size)
/* /*
* Block all blockable signals, except SIGQUIT. posix_fallocate() can run * Block all blockable signals, except SIGQUIT. posix_fallocate() can run
* for quite a long time, and is an all-or-nothing operation. If we * for quite a long time, and is an all-or-nothing operation. If we
* allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery * allowed SIGUSR1 to interrupt us repeatedly (for example, due to
* conflicts), the retry loop might never succeed. * recovery conflicts), the retry loop might never succeed.
*/ */
if (IsUnderPostmaster) if (IsUnderPostmaster)
sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask); sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask);
pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE); pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE);
#if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__) #if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__)
/* /*
* On Linux, a shm_open fd is backed by a tmpfs file. If we were to use * On Linux, a shm_open fd is backed by a tmpfs file. If we were to use
* ftruncate, the file would contain a hole. Accessing memory backed by a * ftruncate, the file would contain a hole. Accessing memory backed by a
@ -374,8 +375,8 @@ dsm_impl_posix_resize(int fd, off_t size)
* SIGBUS later. * SIGBUS later.
* *
* We still use a traditional EINTR retry loop to handle SIGCONT. * We still use a traditional EINTR retry loop to handle SIGCONT.
* posix_fallocate() doesn't restart automatically, and we don't want * posix_fallocate() doesn't restart automatically, and we don't want this
* this to fail if you attach a debugger. * to fail if you attach a debugger.
*/ */
do do
{ {
@ -383,9 +384,9 @@ dsm_impl_posix_resize(int fd, off_t size)
} while (rc == EINTR); } while (rc == EINTR);
/* /*
* The caller expects errno to be set, but posix_fallocate() doesn't * The caller expects errno to be set, but posix_fallocate() doesn't set
* set it. Instead it returns error numbers directly. So set errno, * it. Instead it returns error numbers directly. So set errno, even
* even though we'll also return rc to indicate success or failure. * though we'll also return rc to indicate success or failure.
*/ */
errno = rc; errno = rc;
#else #else

View File

@ -12,8 +12,7 @@ my $output_path = '.';
my $lastlockidx = -1; my $lastlockidx = -1;
my $continue = "\n"; my $continue = "\n";
GetOptions( GetOptions('outdir:s' => \$output_path);
'outdir:s' => \$output_path);
open my $lwlocknames, '<', $ARGV[0] or die; open my $lwlocknames, '<', $ARGV[0] or die;
@ -71,7 +70,8 @@ printf $h "#define NUM_INDIVIDUAL_LWLOCKS %s\n", $lastlockidx + 1;
close $h; close $h;
close $c; close $c;
rename($htmp, "$output_path/lwlocknames.h") || die "rename: $htmp to $output_path/lwlocknames.h: $!"; rename($htmp, "$output_path/lwlocknames.h")
|| die "rename: $htmp to $output_path/lwlocknames.h: $!";
rename($ctmp, "$output_path/lwlocknames.c") || die "rename: $ctmp: $!"; rename($ctmp, "$output_path/lwlocknames.c") || die "rename: $ctmp: $!";
close $lwlocknames; close $lwlocknames;

View File

@ -3936,6 +3936,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
dclist_foreach(proc_iter, waitQueue) dclist_foreach(proc_iter, waitQueue)
{ {
PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur); PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
if (queued_proc == blocked_proc) if (queued_proc == blocked_proc)
break; break;
data->waiter_pids[data->npids++] = queued_proc->pid; data->waiter_pids[data->npids++] = queued_proc->pid;

View File

@ -1118,9 +1118,9 @@ LWLockDequeueSelf(LWLock *lock)
LWLockWaitListLock(lock); LWLockWaitListLock(lock);
/* /*
* Remove ourselves from the waitlist, unless we've already been * Remove ourselves from the waitlist, unless we've already been removed.
* removed. The removal happens with the wait list lock held, so there's * The removal happens with the wait list lock held, so there's no race in
* no race in this check. * this check.
*/ */
on_waitlist = MyProc->lwWaiting == LW_WS_WAITING; on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
if (on_waitlist) if (on_waitlist)

View File

@ -1825,8 +1825,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
/* /*
* If we didn't find any possibly unsafe conflicts because every * If we didn't find any possibly unsafe conflicts because every
* uncommitted writable transaction turned out to be doomed, then we * uncommitted writable transaction turned out to be doomed, then we
* can "opt out" immediately. See comments above the earlier check for * can "opt out" immediately. See comments above the earlier check
* PredXact->WritableSxactCount == 0. * for PredXact->WritableSxactCount == 0.
*/ */
if (dlist_is_empty(&sxact->possibleUnsafeConflicts)) if (dlist_is_empty(&sxact->possibleUnsafeConflicts))
{ {
@ -3564,8 +3564,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
* xmin and purge any transactions which finished before this transaction * xmin and purge any transactions which finished before this transaction
* was launched. * was launched.
* *
* For parallel queries in read-only transactions, it might run twice. * For parallel queries in read-only transactions, it might run twice. We
* We only release the reference on the first call. * only release the reference on the first call.
*/ */
needToClear = false; needToClear = false;
if ((partiallyReleasing || if ((partiallyReleasing ||

View File

@ -597,9 +597,9 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum,
/* /*
* Even if we don't want to use fallocate, we can still extend a * Even if we don't want to use fallocate, we can still extend a
* bit more efficiently than writing each 8kB block individually. * bit more efficiently than writing each 8kB block individually.
* pg_pwrite_zeros() (via FileZero()) uses * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
* pg_pwritev_with_retry() to avoid multiple writes or needing a * to avoid multiple writes or needing a zeroed buffer for the
* zeroed buffer for the whole length of the extension. * whole length of the extension.
*/ */
ret = FileZero(v->mdfd_vfd, ret = FileZero(v->mdfd_vfd,
seekpos, (off_t) BLCKSZ * numblocks, seekpos, (off_t) BLCKSZ * numblocks,

View File

@ -214,6 +214,7 @@ $bmap{'f'} = 'false';
my @fmgr_builtin_oid_index; my @fmgr_builtin_oid_index;
my $last_builtin_oid = 0; my $last_builtin_oid = 0;
my $fmgr_count = 0; my $fmgr_count = 0;
foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr) foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
{ {
next if $s->{lang} ne 'internal'; next if $s->{lang} ne 'internal';

View File

@ -189,8 +189,7 @@ float4in_internal(char *num, char **endptr_p,
/* /*
* endptr points to the first character _after_ the sequence we recognized * endptr points to the first character _after_ the sequence we recognized
* as a valid floating point number. orig_string points to the original * as a valid floating point number. orig_string points to the original
* input * input string.
* string.
*/ */
/* skip leading whitespace */ /* skip leading whitespace */

View File

@ -1794,8 +1794,7 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2,
else else
#endif #endif
result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p); result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
if (result == 2147483647) /* _NLSCMPERROR; missing from mingw if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */
* headers */
ereport(ERROR, ereport(ERROR,
(errmsg("could not compare Unicode strings: %m"))); (errmsg("could not compare Unicode strings: %m")));
@ -1826,6 +1825,7 @@ pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale)
{ {
size_t len1 = strlen(arg1); size_t len1 = strlen(arg1);
size_t len2 = strlen(arg2); size_t len2 = strlen(arg2);
result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale); result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale);
} }
else else
@ -2554,6 +2554,7 @@ uchar_length(UConverter *converter, const char *str, int32_t len)
{ {
UErrorCode status = U_ZERO_ERROR; UErrorCode status = U_ZERO_ERROR;
int32_t ulen; int32_t ulen;
ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status); ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status);
if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR) if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR)
ereport(ERROR, ereport(ERROR,
@ -2571,6 +2572,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen,
{ {
UErrorCode status = U_ZERO_ERROR; UErrorCode status = U_ZERO_ERROR;
int32_t ulen; int32_t ulen;
status = U_ZERO_ERROR; status = U_ZERO_ERROR;
ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status); ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status);
if (U_FAILURE(status)) if (U_FAILURE(status))
@ -2803,8 +2805,8 @@ icu_language_tag(const char *loc_str, int elevel)
return pstrdup("en-US-u-va-posix"); return pstrdup("en-US-u-va-posix");
/* /*
* A BCP47 language tag doesn't have a clearly-defined upper limit * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
* (cf. RFC5646 section 4.4). Additionally, in older ICU versions, * RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the * uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop. * first call, necessitating a loop.
*/ */

View File

@ -1021,7 +1021,8 @@ hashbpchar(PG_FUNCTION_ARGS)
} }
else else
{ {
Size bsize, rsize; Size bsize,
rsize;
char *buf; char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale); bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
@ -1033,8 +1034,8 @@ hashbpchar(PG_FUNCTION_ARGS)
/* /*
* In principle, there's no reason to include the terminating NUL * In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior * character in the hash, but it was done before and the behavior must
* must be preserved. * be preserved.
*/ */
result = hash_any((uint8_t *) buf, bsize + 1); result = hash_any((uint8_t *) buf, bsize + 1);
@ -1076,7 +1077,8 @@ hashbpcharextended(PG_FUNCTION_ARGS)
} }
else else
{ {
Size bsize, rsize; Size bsize,
rsize;
char *buf; char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale); bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
@ -1088,8 +1090,8 @@ hashbpcharextended(PG_FUNCTION_ARGS)
/* /*
* In principle, there's no reason to include the terminating NUL * In principle, there's no reason to include the terminating NUL
* character in the hash, but it was done before and the behavior * character in the hash, but it was done before and the behavior must
* must be preserved. * be preserved.
*/ */
result = hash_any_extended((uint8_t *) buf, bsize + 1, result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1)); PG_GETARG_INT64(1));

View File

@ -2312,8 +2312,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup)
memcpy(sss->buf1, authoritative_data, len); memcpy(sss->buf1, authoritative_data, len);
/* /*
* pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings.
* strings.
*/ */
sss->buf1[len] = '\0'; sss->buf1[len] = '\0';
sss->last_len1 = len; sss->last_len1 = len;

View File

@ -3769,8 +3769,8 @@ RelationSetNewRelfilenumber(Relation relation, char persistence)
/* /*
* During a binary upgrade, we use this code path to ensure that * During a binary upgrade, we use this code path to ensure that
* pg_largeobject and its index have the same relfilenumbers as in * pg_largeobject and its index have the same relfilenumbers as in the
* the old cluster. This is necessary because pg_upgrade treats * old cluster. This is necessary because pg_upgrade treats
* pg_largeobject like a user table, not a system table. It is however * pg_largeobject like a user table, not a system table. It is however
* possible that a table or index may need to end up with the same * possible that a table or index may need to end up with the same
* relfilenumber in the new cluster as what it had in the old cluster. * relfilenumber in the new cluster as what it had in the old cluster.
@ -5314,8 +5314,8 @@ restart:
* when the column value changes, thus require a separate * when the column value changes, thus require a separate
* attribute bitmapset. * attribute bitmapset.
* *
* Obviously, non-key columns couldn't be referenced by * Obviously, non-key columns couldn't be referenced by foreign
* foreign key or identity key. Hence we do not include them into * key or identity key. Hence we do not include them into
* uindexattrs, pkindexattrs and idindexattrs bitmaps. * uindexattrs, pkindexattrs and idindexattrs bitmaps.
*/ */
if (attrnum != 0) if (attrnum != 0)

View File

@ -801,11 +801,11 @@ read_relmap_file(RelMapFile *map, char *dbpath, bool lock_held, int elevel)
/* /*
* Open the target file. * Open the target file.
* *
* Because Windows isn't happy about the idea of renaming over a file * Because Windows isn't happy about the idea of renaming over a file that
* that someone has open, we only open this file after acquiring the lock, * someone has open, we only open this file after acquiring the lock, and
* and for the same reason, we close it before releasing the lock. That * for the same reason, we close it before releasing the lock. That way,
* way, by the time write_relmap_file() acquires an exclusive lock, no * by the time write_relmap_file() acquires an exclusive lock, no one else
* one else will have it open. * will have it open.
*/ */
snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath, snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath,
RELMAPPER_FILENAME); RELMAPPER_FILENAME);

View File

@ -9,8 +9,7 @@ use Getopt::Long;
my $outfile = ''; my $outfile = '';
GetOptions( GetOptions('outfile=s' => \$outfile) or die "$0: wrong arguments";
'outfile=s' => \$outfile) or die "$0: wrong arguments";
open my $errcodes, '<', $ARGV[0] open my $errcodes, '<', $ARGV[0]
or die "$0: could not open input file '$ARGV[0]': $!\n"; or die "$0: could not open input file '$ARGV[0]': $!\n";

View File

@ -933,10 +933,10 @@ InitPostgres(const char *in_dbname, Oid dboid,
} }
/* /*
* The last few connection slots are reserved for superusers and roles with * The last few connection slots are reserved for superusers and roles
* privileges of pg_use_reserved_connections. Replication connections are * with privileges of pg_use_reserved_connections. Replication
* drawn from slots reserved with max_wal_senders and are not limited by * connections are drawn from slots reserved with max_wal_senders and are
* max_connections, superuser_reserved_connections, or * not limited by max_connections, superuser_reserved_connections, or
* reserved_connections. * reserved_connections.
* *
* Note: At this point, the new backend has already claimed a proc struct, * Note: At this point, the new backend has already claimed a proc struct,

View File

@ -67,9 +67,9 @@ SwitchToUntrustedUser(Oid userid, UserContext *context)
* This user can SET ROLE to the target user, but not the other way * This user can SET ROLE to the target user, but not the other way
* around, so protect ourselves against the target user by setting * around, so protect ourselves against the target user by setting
* SECURITY_RESTRICTED_OPERATION to prevent certain changes to the * SECURITY_RESTRICTED_OPERATION to prevent certain changes to the
* session state. Also set up a new GUC nest level, so that we can roll * session state. Also set up a new GUC nest level, so that we can
* back any GUC changes that may be made by code running as the target * roll back any GUC changes that may be made by code running as the
* user, inasmuch as they could be malicious. * target user, inasmuch as they could be malicious.
*/ */
sec_context |= SECURITY_RESTRICTED_OPERATION; sec_context |= SECURITY_RESTRICTED_OPERATION;
SetUserIdAndSecContext(userid, sec_context); SetUserIdAndSecContext(userid, sec_context);

View File

@ -607,8 +607,10 @@ sub print_radix_table
# Print the next line's worth of values. # Print the next line's worth of values.
# XXX pad to begin at a nice boundary # XXX pad to begin at a nice boundary
printf $out " /* %02x */ ", $i; printf $out " /* %02x */ ", $i;
for (my $j = 0; for (
$j < $vals_per_line && $i <= $seg->{max_idx}; $j++) my $j = 0;
$j < $vals_per_line && $i <= $seg->{max_idx};
$j++)
{ {
# missing values represent zero. # missing values represent zero.
my $val = $seg->{values}->{$i} || 0; my $val = $seg->{values}->{$i} || 0;

View File

@ -1470,8 +1470,8 @@ check_GUC_init(struct config_generic *gconf)
/* Flag combinations */ /* Flag combinations */
/* /*
* GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of
* of SHOW ALL should not be hidden in postgresql.conf.sample. * SHOW ALL should not be hidden in postgresql.conf.sample.
*/ */
if ((gconf->flags & GUC_NO_SHOW_ALL) && if ((gconf->flags & GUC_NO_SHOW_ALL) &&
!(gconf->flags & GUC_NOT_IN_SAMPLE)) !(gconf->flags & GUC_NOT_IN_SAMPLE))

View File

@ -734,9 +734,9 @@ MemoryContextStatsDetail(MemoryContext context, int max_children,
* *
* We don't buffer the information about all memory contexts in a * We don't buffer the information about all memory contexts in a
* backend into StringInfo and log it as one message. That would * backend into StringInfo and log it as one message. That would
* require the buffer to be enlarged, risking an OOM as there could * require the buffer to be enlarged, risking an OOM as there could be
* be a large number of memory contexts in a backend. Instead, we * a large number of memory contexts in a backend. Instead, we log
* log one message per memory context. * one message per memory context.
*/ */
ereport(LOG_SERVER_ONLY, ereport(LOG_SERVER_ONLY,
(errhidestmt(true), (errhidestmt(true),

View File

@ -1438,8 +1438,8 @@ tuplesort_performsort(Tuplesortstate *state)
/* /*
* We were able to accumulate all the tuples required for output * We were able to accumulate all the tuples required for output
* in memory, using a heap to eliminate excess tuples. Now we * in memory, using a heap to eliminate excess tuples. Now we
* have to transform the heap to a properly-sorted array. * have to transform the heap to a properly-sorted array. Note
* Note that sort_bounded_heap sets the correct state->status. * that sort_bounded_heap sets the correct state->status.
*/ */
sort_bounded_heap(state); sort_bounded_heap(state);
state->current = 0; state->current = 0;

View File

@ -1565,8 +1565,8 @@ static void
setup_auth(FILE *cmdfd) setup_auth(FILE *cmdfd)
{ {
/* /*
* The authid table shouldn't be readable except through views, to * The authid table shouldn't be readable except through views, to ensure
* ensure passwords are not publicly visible. * passwords are not publicly visible.
*/ */
PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n"); PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n");
@ -1957,9 +1957,9 @@ make_template0(FILE *cmdfd)
" STRATEGY = file_copy;\n\n"); " STRATEGY = file_copy;\n\n");
/* /*
* template0 shouldn't have any collation-dependent objects, so unset * template0 shouldn't have any collation-dependent objects, so unset the
* the collation version. This disables collation version checks when * collation version. This disables collation version checks when making
* making a new database from it. * a new database from it.
*/ */
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n"); PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n");
@ -1969,9 +1969,8 @@ make_template0(FILE *cmdfd)
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n"); PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n");
/* /*
* Explicitly revoke public create-schema and create-temp-table * Explicitly revoke public create-schema and create-temp-table privileges
* privileges in template1 and template0; else the latter would be on * in template1 and template0; else the latter would be on by default
* by default
*/ */
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n"); PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n");
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n"); PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n");
@ -2264,8 +2263,8 @@ icu_language_tag(const char *loc_str)
return pstrdup("en-US-u-va-posix"); return pstrdup("en-US-u-va-posix");
/* /*
* A BCP47 language tag doesn't have a clearly-defined upper limit * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
* (cf. RFC5646 section 4.4). Additionally, in older ICU versions, * RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the * uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop. * first call, necessitating a loop.
*/ */

View File

@ -132,8 +132,8 @@ if ($ENV{with_icu} eq 'yes')
command_fails_like( command_fails_like(
[ [
'initdb', '--no-sync', 'initdb', '--no-sync',
'--locale-provider=icu', '--locale-provider=icu', '--icu-locale=nonsense-nowhere',
'--icu-locale=nonsense-nowhere', "$tempdir/dataX" "$tempdir/dataX"
], ],
qr/error: locale "nonsense-nowhere" has unknown language "nonsense"/, qr/error: locale "nonsense-nowhere" has unknown language "nonsense"/,
'fails for nonsense language'); 'fails for nonsense language');
@ -141,8 +141,8 @@ if ($ENV{with_icu} eq 'yes')
command_fails_like( command_fails_like(
[ [
'initdb', '--no-sync', 'initdb', '--no-sync',
'--locale-provider=icu', '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
'--icu-locale=@colNumeric=lower', "$tempdir/dataX" "$tempdir/dataX"
], ],
qr/could not open collator for locale "und-u-kn-lower": U_ILLEGAL_ARGUMENT_ERROR/, qr/could not open collator for locale "und-u-kn-lower": U_ILLEGAL_ARGUMENT_ERROR/,
'fails for invalid collation argument'); 'fails for invalid collation argument');

View File

@ -369,8 +369,8 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
$node->clean_node; $node->clean_node;
plan skip_all => plan skip_all =>
sprintf( sprintf(
"Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")", $tupidx, "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")",
0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b); $tupidx, 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
exit; exit;
} }

View File

@ -57,8 +57,10 @@ command_fails_like(
{ {
# like command_like but checking stderr # like command_like but checking stderr
my $stderr; my $stderr;
my $result = IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir, my $result =
$walfiles[2] ], '2>', \$stderr; IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
$walfiles[2] ],
'2>', \$stderr;
ok($result, "pg_archivecleanup dry run: exit code 0"); ok($result, "pg_archivecleanup dry run: exit code 0");
like( like(
$stderr, $stderr,

View File

@ -341,18 +341,18 @@ tablespace_list_append(const char *arg)
/* /*
* All tablespaces are created with absolute directories, so specifying a * All tablespaces are created with absolute directories, so specifying a
* non-absolute path here would just never match, possibly confusing users. * non-absolute path here would just never match, possibly confusing
* Since we don't know whether the remote side is Windows or not, and it * users. Since we don't know whether the remote side is Windows or not,
* might be different than the local side, permit any path that could be * and it might be different than the local side, permit any path that
* absolute under either set of rules. * could be absolute under either set of rules.
* *
* (There is little practical risk of confusion here, because someone * (There is little practical risk of confusion here, because someone
* running entirely on Linux isn't likely to have a relative path that * running entirely on Linux isn't likely to have a relative path that
* begins with a backslash or something that looks like a drive * begins with a backslash or something that looks like a drive
* specification. If they do, and they also incorrectly believe that * specification. If they do, and they also incorrectly believe that a
* a relative path is acceptable here, we'll silently fail to warn them * relative path is acceptable here, we'll silently fail to warn them of
* of their mistake, and the -T option will just not get applied, same * their mistake, and the -T option will just not get applied, same as if
* as if they'd specified -T for a nonexistent tablespace.) * they'd specified -T for a nonexistent tablespace.)
*/ */
if (!is_nonwindows_absolute_path(cell->old_dir) && if (!is_nonwindows_absolute_path(cell->old_dir) &&
!is_windows_absolute_path(cell->old_dir)) !is_windows_absolute_path(cell->old_dir))

View File

@ -144,8 +144,7 @@ SKIP:
'gzip:long', 'gzip:long',
'invalid compression specification: compression algorithm "gzip" does not support long-distance mode', 'invalid compression specification: compression algorithm "gzip" does not support long-distance mode',
'failure on long mode for gzip' 'failure on long mode for gzip'
], ],);
);
for my $cft (@compression_failure_tests) for my $cft (@compression_failure_tests)
{ {
@ -923,7 +922,8 @@ $sigchld_bb->finish();
# Test that we can back up an in-place tablespace # Test that we can back up an in-place tablespace
$node->safe_psql('postgres', $node->safe_psql('postgres',
"SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';"); "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';"
);
$node->safe_psql('postgres', $node->safe_psql('postgres',
"CREATE TABLE test2 (a int) TABLESPACE tblspc2;" "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
. "INSERT INTO test2 VALUES (1234);"); . "INSERT INTO test2 VALUES (1234);");

View File

@ -19,11 +19,12 @@ typedef struct
WalWriteMethod *wwmethod; WalWriteMethod *wwmethod;
off_t currpos; off_t currpos;
char *pathname; char *pathname;
/* /*
* MORE DATA FOLLOWS AT END OF STRUCT * MORE DATA FOLLOWS AT END OF STRUCT
* *
* Each WalWriteMethod is expected to embed this as the first member of * Each WalWriteMethod is expected to embed this as the first member of a
* a larger struct with method-specific fields following. * larger struct with method-specific fields following.
*/ */
} Walfile; } Walfile;
@ -107,11 +108,12 @@ struct WalWriteMethod
bool sync; bool sync;
const char *lasterrstring; /* if set, takes precedence over lasterrno */ const char *lasterrstring; /* if set, takes precedence over lasterrno */
int lasterrno; int lasterrno;
/* /*
* MORE DATA FOLLOWS AT END OF STRUCT * MORE DATA FOLLOWS AT END OF STRUCT
* *
* Each WalWriteMethod is expected to embed this as the first member of * Each WalWriteMethod is expected to embed this as the first member of a
* a larger struct with method-specific fields following. * larger struct with method-specific fields following.
*/ */
}; };

View File

@ -651,8 +651,8 @@ LZ4Stream_gets(char *ptr, int size, CompressFileHandle *CFH)
return NULL; return NULL;
/* /*
* Our caller expects the return string to be NULL terminated * Our caller expects the return string to be NULL terminated and we know
* and we know that ret is greater than zero. * that ret is greater than zero.
*/ */
ptr[ret - 1] = '\0'; ptr[ret - 1] = '\0';

View File

@ -387,6 +387,7 @@ RestoreArchive(Archive *AHX)
if (te->hadDumper && (te->reqs & REQ_DATA) != 0) if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
{ {
char *errmsg = supports_compression(AH->compression_spec); char *errmsg = supports_compression(AH->compression_spec);
if (errmsg) if (errmsg)
pg_fatal("cannot restore from compressed archive (%s)", pg_fatal("cannot restore from compressed archive (%s)",
errmsg); errmsg);
@ -2985,11 +2986,11 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
if (!te->hadDumper) if (!te->hadDumper)
{ {
/* /*
* Special Case: If 'SEQUENCE SET' or anything to do with LOs, then * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it
* it is considered a data entry. We don't need to check for the * is considered a data entry. We don't need to check for the BLOBS
* BLOBS entry or old-style BLOB COMMENTS, because they will have * entry or old-style BLOB COMMENTS, because they will have hadDumper
* hadDumper = true ... but we do need to check new-style BLOB ACLs, * = true ... but we do need to check new-style BLOB ACLs, comments,
* comments, etc. * etc.
*/ */
if (strcmp(te->desc, "SEQUENCE SET") == 0 || if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
strcmp(te->desc, "BLOB") == 0 || strcmp(te->desc, "BLOB") == 0 ||
@ -3480,6 +3481,7 @@ _getObjectDescription(PQExpBuffer buf, const TocEntry *te)
{ {
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag); appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
} }
/* /*
* These object types require additional decoration. Fortunately, the * These object types require additional decoration. Fortunately, the
* information needed is exactly what's in the DROP command. * information needed is exactly what's in the DROP command.
@ -3639,6 +3641,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
initPQExpBuffer(&temp); initPQExpBuffer(&temp);
_getObjectDescription(&temp, te); _getObjectDescription(&temp, te);
/* /*
* If _getObjectDescription() didn't fill the buffer, then there is no * If _getObjectDescription() didn't fill the buffer, then there is no
* owner. * owner.

View File

@ -684,10 +684,10 @@ _LoadLOs(ArchiveHandle *AH)
tarClose(AH, th); tarClose(AH, th);
/* /*
* Once we have found the first LO, stop at the first non-LO * Once we have found the first LO, stop at the first non-LO entry
* entry (which will be 'blobs.toc'). This coding would eat all * (which will be 'blobs.toc'). This coding would eat all the
* the rest of the archive if there are no LOs ... but this * rest of the archive if there are no LOs ... but this function
* function shouldn't be called at all in that case. * shouldn't be called at all in that case.
*/ */
if (foundLO) if (foundLO)
break; break;

View File

@ -756,9 +756,9 @@ main(int argc, char **argv)
pg_fatal("%s", error_detail); pg_fatal("%s", error_detail);
/* /*
* Disable support for zstd workers for now - these are based on threading, * Disable support for zstd workers for now - these are based on
* and it's unclear how it interacts with parallel dumps on platforms where * threading, and it's unclear how it interacts with parallel dumps on
* that relies on threads too (e.g. Windows). * platforms where that relies on threads too (e.g. Windows).
*/ */
if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS) if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
pg_log_warning("compression option \"%s\" is not currently supported by pg_dump", pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
@ -879,8 +879,8 @@ main(int argc, char **argv)
/* /*
* Dumping LOs is the default for dumps where an inclusion switch is not * Dumping LOs is the default for dumps where an inclusion switch is not
* used (an "include everything" dump). -B can be used to exclude LOs * used (an "include everything" dump). -B can be used to exclude LOs
* from those dumps. -b can be used to include LOs even when an * from those dumps. -b can be used to include LOs even when an inclusion
* inclusion switch is used. * switch is used.
* *
* -s means "schema only" and LOs are data, not schema, so we never * -s means "schema only" and LOs are data, not schema, so we never
* include LOs when -s is used. * include LOs when -s is used.
@ -915,8 +915,8 @@ main(int argc, char **argv)
* data or the associated metadata that resides in the pg_largeobject and * data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively. * pg_largeobject_metadata tables, respectively.
* *
* However, we do need to collect LO information as there may be * However, we do need to collect LO information as there may be comments
* comments or other information on LOs that we do need to dump out. * or other information on LOs that we do need to dump out.
*/ */
if (dopt.outputLOs || dopt.binary_upgrade) if (dopt.outputLOs || dopt.binary_upgrade)
getLOs(fout); getLOs(fout);
@ -3590,8 +3590,8 @@ getLOs(Archive *fout)
loinfo[i].dobj.components |= DUMP_COMPONENT_ACL; loinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
/* /*
* In binary-upgrade mode for LOs, we do *not* dump out the LO * In binary-upgrade mode for LOs, we do *not* dump out the LO data,
* data, as it will be copied by pg_upgrade, which simply copies the * as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the * pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not * data, as pg_upgrade copies just pg_largeobject, but not
* pg_largeobject_metadata, after the dump is restored. * pg_largeobject_metadata, after the dump is restored.
@ -14828,7 +14828,10 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
if (dopt->no_security_labels) if (dopt->no_security_labels)
return; return;
/* Security labels are schema not data ... except large object labels are data */ /*
* Security labels are schema not data ... except large object labels are
* data
*/
if (strcmp(type, "LARGE OBJECT") != 0) if (strcmp(type, "LARGE OBJECT") != 0)
{ {
if (dopt->dataOnly) if (dopt->dataOnly)
@ -16632,10 +16635,12 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
{ {
appendPQExpBufferStr(q, appendPQExpBufferStr(q,
coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE"); coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
/* /*
* PRIMARY KEY constraints should not be using NULLS NOT DISTINCT * PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
* indexes. Being able to create this was fixed, but we need to * indexes. Being able to create this was fixed, but we need to
* make the index distinct in order to be able to restore the dump. * make the index distinct in order to be able to restore the
* dump.
*/ */
if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p') if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
appendPQExpBufferStr(q, " NULLS NOT DISTINCT"); appendPQExpBufferStr(q, " NULLS NOT DISTINCT");

View File

@ -996,8 +996,8 @@ dumpRoleMembership(PGconn *conn)
/* /*
* We can't dump these GRANT commands in arbitrary order, because a role * We can't dump these GRANT commands in arbitrary order, because a role
* that is named as a grantor must already have ADMIN OPTION on the * that is named as a grantor must already have ADMIN OPTION on the role
* role for which it is granting permissions, except for the bootstrap * for which it is granting permissions, except for the bootstrap
* superuser, who can always be named as the grantor. * superuser, who can always be named as the grantor.
* *
* We handle this by considering these grants role by role. For each role, * We handle this by considering these grants role by role. For each role,
@ -1005,8 +1005,8 @@ dumpRoleMembership(PGconn *conn)
* superuser. Every time we grant ADMIN OPTION on the role to some user, * superuser. Every time we grant ADMIN OPTION on the role to some user,
* that user also becomes an allowable grantor. We make repeated passes * that user also becomes an allowable grantor. We make repeated passes
* over the grants for the role, each time dumping those whose grantors * over the grants for the role, each time dumping those whose grantors
* are allowable and which we haven't done yet. Eventually this should * are allowable and which we haven't done yet. Eventually this should let
* let us dump all the grants. * us dump all the grants.
*/ */
total = PQntuples(res); total = PQntuples(res);
while (start < total) while (start < total)

View File

@ -156,10 +156,8 @@ my %pgdump_runs = (
"$tempdir/compression_lz4_custom.dump", "$tempdir/compression_lz4_custom.dump",
], ],
command_like => { command_like => {
command => [ command =>
'pg_restore', [ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ],
'-l', "$tempdir/compression_lz4_custom.dump",
],
expected => qr/Compression: lz4/, expected => qr/Compression: lz4/,
name => 'data content is lz4 compressed' name => 'data content is lz4 compressed'
}, },
@ -229,8 +227,7 @@ my %pgdump_runs = (
], ],
command_like => { command_like => {
command => [ command => [
'pg_restore', 'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump",
'-l', "$tempdir/compression_zstd_custom.dump",
], ],
expected => qr/Compression: zstd/, expected => qr/Compression: zstd/,
name => 'data content is zstd compressed' name => 'data content is zstd compressed'
@ -250,8 +247,8 @@ my %pgdump_runs = (
compress_cmd => { compress_cmd => {
program => $ENV{'ZSTD'}, program => $ENV{'ZSTD'},
args => [ args => [
'-z', '-f', '--rm', '-z', '-f',
"$tempdir/compression_zstd_dir/blobs.toc", '--rm', "$tempdir/compression_zstd_dir/blobs.toc",
"-o", "$tempdir/compression_zstd_dir/blobs.toc.zst", "-o", "$tempdir/compression_zstd_dir/blobs.toc.zst",
], ],
}, },
@ -280,8 +277,8 @@ my %pgdump_runs = (
program => $ENV{'ZSTD'}, program => $ENV{'ZSTD'},
args => [ args => [
'-d', '-f', '-d', '-f',
"$tempdir/compression_zstd_plain.sql.zst", "$tempdir/compression_zstd_plain.sql.zst", "-o",
"-o", "$tempdir/compression_zstd_plain.sql", "$tempdir/compression_zstd_plain.sql",
], ],
}, },
}, },
@ -385,9 +382,9 @@ my %pgdump_runs = (
command_like => { command_like => {
command => command =>
[ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ], [ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ],
expected => $supports_gzip ? expected => $supports_gzip
qr/Compression: gzip/ : ? qr/Compression: gzip/
qr/Compression: none/, : qr/Compression: none/,
name => 'data content is gzip-compressed by default if available', name => 'data content is gzip-compressed by default if available',
}, },
}, },
@ -410,17 +407,15 @@ my %pgdump_runs = (
command_like => { command_like => {
command => command =>
[ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ], [ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ],
expected => $supports_gzip ? expected => $supports_gzip ? qr/Compression: gzip/
qr/Compression: gzip/ : : qr/Compression: none/,
qr/Compression: none/,
name => 'data content is gzip-compressed by default', name => 'data content is gzip-compressed by default',
}, },
glob_patterns => [ glob_patterns => [
"$tempdir/defaults_dir_format/toc.dat", "$tempdir/defaults_dir_format/toc.dat",
"$tempdir/defaults_dir_format/blobs.toc", "$tempdir/defaults_dir_format/blobs.toc",
$supports_gzip ? $supports_gzip ? "$tempdir/defaults_dir_format/*.dat.gz"
"$tempdir/defaults_dir_format/*.dat.gz" : : "$tempdir/defaults_dir_format/*.dat",
"$tempdir/defaults_dir_format/*.dat",
], ],
}, },
@ -468,7 +463,8 @@ my %pgdump_runs = (
}, },
exclude_measurement => { exclude_measurement => {
dump_cmd => [ dump_cmd => [
'pg_dump', '--no-sync', 'pg_dump',
'--no-sync',
"--file=$tempdir/exclude_measurement.sql", "--file=$tempdir/exclude_measurement.sql",
'--exclude-table-and-children=dump_test.measurement', '--exclude-table-and-children=dump_test.measurement',
'postgres', 'postgres',
@ -534,9 +530,8 @@ my %pgdump_runs = (
}, },
no_large_objects => { no_large_objects => {
dump_cmd => [ dump_cmd => [
'pg_dump', '--no-sync', 'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql",
"--file=$tempdir/no_large_objects.sql", '-B', '-B', 'postgres',
'postgres',
], ],
}, },
no_privs => { no_privs => {
@ -1339,8 +1334,7 @@ my %tests = (
}, },
'LO create (with no data)' => { 'LO create (with no data)' => {
create_sql => create_sql => 'SELECT pg_catalog.lo_create(0);',
'SELECT pg_catalog.lo_create(0);',
regexp => qr/^ regexp => qr/^
\QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n \QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n
\QSELECT pg_catalog.lo_close(0);\E \QSELECT pg_catalog.lo_close(0);\E
@ -1933,7 +1927,8 @@ my %tests = (
'CREATE COLLATION icu_collation' => { 'CREATE COLLATION icu_collation' => {
create_order => 76, create_order => 76,
create_sql => "CREATE COLLATION icu_collation (PROVIDER = icu, LOCALE = 'en-US-u-va-posix');", create_sql =>
"CREATE COLLATION icu_collation (PROVIDER = icu, LOCALE = 'en-US-u-va-posix');",
regexp => regexp =>
qr/CREATE COLLATION public.icu_collation \(provider = icu, locale = 'en-US-u-va-posix'(, version = '[^']*')?\);/m, qr/CREATE COLLATION public.icu_collation \(provider = icu, locale = 'en-US-u-va-posix'(, version = '[^']*')?\);/m,
icu => 1, icu => 1,
@ -3119,9 +3114,7 @@ my %tests = (
\Q);\E \Q);\E
/xm, /xm,
like => { like => {
%full_runs, %full_runs, %dump_test_schema_runs, section_pre_data => 1,
%dump_test_schema_runs,
section_pre_data => 1,
}, },
unlike => { unlike => {
exclude_dump_test_schema => 1, exclude_dump_test_schema => 1,
@ -3290,7 +3283,8 @@ my %tests = (
\QEXECUTE FUNCTION dump_test.trigger_func();\E \QEXECUTE FUNCTION dump_test.trigger_func();\E
/xm, /xm,
like => { like => {
%full_runs, %dump_test_schema_runs, section_post_data => 1, %full_runs, %dump_test_schema_runs,
section_post_data => 1,
only_dump_measurement => 1, only_dump_measurement => 1,
}, },
unlike => { unlike => {
@ -3301,7 +3295,8 @@ my %tests = (
'COPY measurement' => { 'COPY measurement' => {
create_order => 93, create_order => 93,
create_sql => 'INSERT INTO dump_test.measurement (city_id, logdate, peaktemp, unitsales) ' create_sql =>
'INSERT INTO dump_test.measurement (city_id, logdate, peaktemp, unitsales) '
. "VALUES (1, '2006-02-12', 35, 1);", . "VALUES (1, '2006-02-12', 35, 1);",
regexp => qr/^ regexp => qr/^
\QCOPY dump_test_second_schema.measurement_y2006m2 (city_id, logdate, peaktemp, unitsales) FROM stdin;\E \QCOPY dump_test_second_schema.measurement_y2006m2 (city_id, logdate, peaktemp, unitsales) FROM stdin;\E
@ -4768,12 +4763,16 @@ foreach my $run (sort keys %pgdump_runs)
my $run_db = 'postgres'; my $run_db = 'postgres';
# Skip command-level tests for gzip/lz4/zstd if the tool is not supported # Skip command-level tests for gzip/lz4/zstd if the tool is not supported
if ($pgdump_runs{$run}->{compile_option} && if ($pgdump_runs{$run}->{compile_option}
(($pgdump_runs{$run}->{compile_option} eq 'gzip' && !$supports_gzip) || && (($pgdump_runs{$run}->{compile_option} eq 'gzip'
($pgdump_runs{$run}->{compile_option} eq 'lz4' && !$supports_lz4) || && !$supports_gzip)
($pgdump_runs{$run}->{compile_option} eq 'zstd' && !$supports_zstd))) || ($pgdump_runs{$run}->{compile_option} eq 'lz4'
&& !$supports_lz4)
|| ($pgdump_runs{$run}->{compile_option} eq 'zstd'
&& !$supports_zstd)))
{ {
note "$run: skipped due to no $pgdump_runs{$run}->{compile_option} support"; note
"$run: skipped due to no $pgdump_runs{$run}->{compile_option} support";
next; next;
} }
@ -4800,16 +4799,18 @@ foreach my $run (sort keys %pgdump_runs)
foreach my $glob_pattern (@{$glob_patterns}) foreach my $glob_pattern (@{$glob_patterns})
{ {
my @glob_output = glob($glob_pattern); my @glob_output = glob($glob_pattern);
is(scalar(@glob_output) > 0, 1, "$run: glob check for $glob_pattern"); is(scalar(@glob_output) > 0,
1, "$run: glob check for $glob_pattern");
} }
} }
if ($pgdump_runs{$run}->{command_like}) if ($pgdump_runs{$run}->{command_like})
{ {
my $cmd_like = $pgdump_runs{$run}->{command_like}; my $cmd_like = $pgdump_runs{$run}->{command_like};
$node->command_like(\@{ $cmd_like->{command} }, $node->command_like(
\@{ $cmd_like->{command} },
$cmd_like->{expected}, $cmd_like->{expected},
"$run: " . $cmd_like->{name}) "$run: " . $cmd_like->{name});
} }
if ($pgdump_runs{$run}->{restore_cmd}) if ($pgdump_runs{$run}->{restore_cmd})

View File

@ -105,8 +105,8 @@ check_and_dump_old_cluster(bool live_check)
check_for_isn_and_int8_passing_mismatch(&old_cluster); check_for_isn_and_int8_passing_mismatch(&old_cluster);
/* /*
* PG 16 increased the size of the 'aclitem' type, which breaks the on-disk * PG 16 increased the size of the 'aclitem' type, which breaks the
* format for existing data. * on-disk format for existing data.
*/ */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1500) if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1500)
check_for_aclitem_data_type_usage(&old_cluster); check_for_aclitem_data_type_usage(&old_cluster);

View File

@ -138,11 +138,12 @@ $oldnode->start;
my $result; my $result;
$result = $oldnode->safe_psql( $result = $oldnode->safe_psql(
'postgres', "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field 'postgres',
"SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
FROM pg_database WHERE datname='template0'"); FROM pg_database WHERE datname='template0'");
is($result, "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale", is( $result,
"check locales in original cluster" "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
); "check locales in original cluster");
# The default location of the source code is the root of this directory. # The default location of the source code is the root of this directory.
my $srcdir = abs_path("../../.."); my $srcdir = abs_path("../../..");
@ -337,8 +338,7 @@ command_fails(
'-s', $newnode->host, '-s', $newnode->host,
'-p', $oldnode->port, '-p', $oldnode->port,
'-P', $newnode->port, '-P', $newnode->port,
$mode, $mode, '--check',
'--check',
], ],
'run of pg_upgrade --check for new instance with incorrect binary path'); 'run of pg_upgrade --check for new instance with incorrect binary path');
ok(-d $newnode->data_dir . "/pg_upgrade_output.d", ok(-d $newnode->data_dir . "/pg_upgrade_output.d",
@ -352,8 +352,7 @@ command_ok(
'-D', $newnode->data_dir, '-b', $oldbindir, '-D', $newnode->data_dir, '-b', $oldbindir,
'-B', $newbindir, '-s', $newnode->host, '-B', $newbindir, '-s', $newnode->host,
'-p', $oldnode->port, '-P', $newnode->port, '-p', $oldnode->port, '-P', $newnode->port,
$mode, $mode, '--check',
'--check',
], ],
'run of pg_upgrade --check for new instance'); 'run of pg_upgrade --check for new instance');
ok(!-d $newnode->data_dir . "/pg_upgrade_output.d", ok(!-d $newnode->data_dir . "/pg_upgrade_output.d",
@ -396,11 +395,12 @@ if (-d $log_path)
# Test that upgraded cluster has original locale settings. # Test that upgraded cluster has original locale settings.
$result = $newnode->safe_psql( $result = $newnode->safe_psql(
'postgres', "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field 'postgres',
"SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field
FROM pg_database WHERE datname='template0'"); FROM pg_database WHERE datname='template0'");
is($result, "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale", is( $result,
"check that locales in new cluster match original cluster" "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale",
); "check that locales in new cluster match original cluster");
# Second dump from the upgraded instance. # Second dump from the upgraded instance.
@dump_command = ( @dump_command = (

View File

@ -5063,15 +5063,16 @@ pset_value_string(const char *param, printQueryOpt *popt)
else if (strcmp(param, "xheader_width") == 0) else if (strcmp(param, "xheader_width") == 0)
{ {
if (popt->topt.expanded_header_width_type == PRINT_XHEADER_FULL) if (popt->topt.expanded_header_width_type == PRINT_XHEADER_FULL)
return(pstrdup("full")); return pstrdup("full");
else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_COLUMN) else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_COLUMN)
return(pstrdup("column")); return pstrdup("column");
else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_PAGE) else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_PAGE)
return(pstrdup("page")); return pstrdup("page");
else else
{ {
/* must be PRINT_XHEADER_EXACT_WIDTH */ /* must be PRINT_XHEADER_EXACT_WIDTH */
char wbuff[32]; char wbuff[32];
snprintf(wbuff, sizeof(wbuff), "%d", snprintf(wbuff, sizeof(wbuff), "%d",
popt->topt.expanded_header_exact_width); popt->topt.expanded_header_exact_width);
return pstrdup(wbuff); return pstrdup(wbuff);

View File

@ -96,7 +96,8 @@ typedef struct _psqlSettings
char *gset_prefix; /* one-shot prefix argument for \gset */ char *gset_prefix; /* one-shot prefix argument for \gset */
bool gdesc_flag; /* one-shot request to describe query result */ bool gdesc_flag; /* one-shot request to describe query result */
bool gexec_flag; /* one-shot request to execute query result */ bool gexec_flag; /* one-shot request to execute query result */
bool bind_flag; /* one-shot request to use extended query protocol */ bool bind_flag; /* one-shot request to use extended query
* protocol */
int bind_nparams; /* number of parameters */ int bind_nparams; /* number of parameters */
char **bind_params; /* parameters for extended query protocol call */ char **bind_params; /* parameters for extended query protocol call */
bool crosstab_flag; /* one-shot request to crosstab result */ bool crosstab_flag; /* one-shot request to crosstab result */

View File

@ -348,16 +348,12 @@ psql_like(
qr/1\|value\|2022-07-04 00:00:00 qr/1\|value\|2022-07-04 00:00:00
2|test|2022-07-03 00:00:00 2|test|2022-07-03 00:00:00
3|test|2022-07-05 00:00:00/, 3|test|2022-07-05 00:00:00/,
'\copy from with DEFAULT' '\copy from with DEFAULT');
);
# Check \watch # Check \watch
# Note: the interval value is parsed with locale-aware strtod() # Note: the interval value is parsed with locale-aware strtod()
psql_like( psql_like($node, sprintf('SELECT 1 \watch c=3 i=%g', 0.01),
$node, qr/1\n1\n1/, '\watch with 3 iterations');
sprintf('SELECT 1 \watch c=3 i=%g', 0.01),
qr/1\n1\n1/,
'\watch with 3 iterations');
# Check \watch errors # Check \watch errors
psql_fails_like( psql_fails_like(

View File

@ -71,7 +71,8 @@ delete $ENV{LS_COLORS};
# completion tests is too variable. # completion tests is too variable.
if ($ENV{TESTDATADIR}) if ($ENV{TESTDATADIR})
{ {
chdir $ENV{TESTDATADIR} or die "could not chdir to \"$ENV{TESTDATADIR}\": $!"; chdir $ENV{TESTDATADIR}
or die "could not chdir to \"$ENV{TESTDATADIR}\": $!";
} }
# Create some junk files for filename completion testing. # Create some junk files for filename completion testing.

View File

@ -40,8 +40,10 @@ if ($ENV{with_icu} eq 'yes')
$node->issues_sql_like( $node->issues_sql_like(
[ [
'createdb', '-T', 'createdb', '-T',
'template0', '-E', 'UTF8', '--locale-provider=icu', 'template0', '-E',
'--locale=C', '--icu-locale=en', 'foobar5' 'UTF8', '--locale-provider=icu',
'--locale=C', '--icu-locale=en',
'foobar5'
], ],
qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/, qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/,
'create database with ICU locale specified'); 'create database with ICU locale specified');
@ -69,12 +71,21 @@ if ($ENV{with_icu} eq 'yes')
$node2->start; $node2->start;
$node2->command_ok( $node2->command_ok(
[ 'createdb', '-T', 'template0', '--locale-provider=libc', 'foobar55' ], [
'create database with libc provider from template database with icu provider'); 'createdb', '-T',
'template0', '--locale-provider=libc',
'foobar55'
],
'create database with libc provider from template database with icu provider'
);
$node2->command_ok( $node2->command_ok(
[ 'createdb', '-T', 'template0', '--icu-locale', 'en-US', 'foobar56' ], [
'create database with icu locale from template database with icu provider'); 'createdb', '-T', 'template0', '--icu-locale', 'en-US',
'foobar56'
],
'create database with icu locale from template database with icu provider'
);
} }
else else
{ {
@ -163,17 +174,11 @@ $node->issues_sql_like(
[ 'createdb', '-T', 'foobar2', '-O', 'role_foobar', 'foobar8' ], [ 'createdb', '-T', 'foobar2', '-O', 'role_foobar', 'foobar8' ],
qr/statement: CREATE DATABASE foobar8 OWNER role_foobar TEMPLATE foobar2/, qr/statement: CREATE DATABASE foobar8 OWNER role_foobar TEMPLATE foobar2/,
'create database with owner role_foobar'); 'create database with owner role_foobar');
($ret, $stdout, $stderr) = $node->psql( ($ret, $stdout, $stderr) =
'foobar2', $node->psql('foobar2', 'DROP OWNED BY role_foobar;', on_error_die => 1,);
'DROP OWNED BY role_foobar;',
on_error_die => 1,
);
ok($ret == 0, "DROP OWNED BY role_foobar"); ok($ret == 0, "DROP OWNED BY role_foobar");
($ret, $stdout, $stderr) = $node->psql( ($ret, $stdout, $stderr) =
'foobar2', $node->psql('foobar2', 'DROP DATABASE foobar8;', on_error_die => 1,);
'DROP DATABASE foobar8;',
on_error_die => 1,
);
ok($ret == 0, "DROP DATABASE foobar8"); ok($ret == 0, "DROP DATABASE foobar8");
done_testing(); done_testing();

View File

@ -53,7 +53,8 @@ my $fetch_toast_relfilenodes =
WHERE b.oid IN ('pg_constraint'::regclass, 'test1'::regclass)}; WHERE b.oid IN ('pg_constraint'::regclass, 'test1'::regclass)};
# Same for relfilenodes of normal indexes. This saves the relfilenode # Same for relfilenodes of normal indexes. This saves the relfilenode
# from an index of pg_constraint, and from the index of the test table. # from an index of pg_constraint, and from the index of the test table.
my $fetch_index_relfilenodes = qq{SELECT i.indrelid, a.oid::regclass::text, a.oid, a.relfilenode my $fetch_index_relfilenodes =
qq{SELECT i.indrelid, a.oid::regclass::text, a.oid, a.relfilenode
FROM pg_class a FROM pg_class a
JOIN pg_index i ON (i.indexrelid = a.oid) JOIN pg_index i ON (i.indexrelid = a.oid)
WHERE a.relname IN ('pg_constraint_oid_index', 'test1x')}; WHERE a.relname IN ('pg_constraint_oid_index', 'test1x')};

View File

@ -18,8 +18,7 @@ use PerfectHash;
my $output_path = '.'; my $output_path = '.';
GetOptions( GetOptions('outdir:s' => \$output_path);
'outdir:s' => \$output_path);
my $output_table_file = "$output_path/unicode_norm_table.h"; my $output_table_file = "$output_path/unicode_norm_table.h";
my $output_func_file = "$output_path/unicode_norm_hashfunc.h"; my $output_func_file = "$output_path/unicode_norm_hashfunc.h";

View File

@ -1295,10 +1295,11 @@ print_aligned_vertical_line(const printTableOpt *topt,
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth))); dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth)));
if (opt_border == 1) if (opt_border == 1)
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 3))); dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 3)));
/* /*
* Handling the xheader width for border=2 doesn't make * Handling the xheader width for border=2 doesn't make much
* much sense because this format has an additional * sense because this format has an additional right border,
* right border, but keep this for consistency. * but keep this for consistency.
*/ */
if (opt_border == 2) if (opt_border == 2)
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 7))); dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 7)));

View File

@ -550,6 +550,7 @@ extern void gistSplitByKey(Relation r, Page page, IndexTuple *itup,
/* gistbuild.c */ /* gistbuild.c */
extern IndexBuildResult *gistbuild(Relation heap, Relation index, extern IndexBuildResult *gistbuild(Relation heap, Relation index,
struct IndexInfo *indexInfo); struct IndexInfo *indexInfo);
/* gistbuildbuffers.c */ /* gistbuildbuffers.c */
extern GISTBuildBuffers *gistInitBuildBuffers(int pagesPerBuffer, int levelStep, extern GISTBuildBuffers *gistInitBuildBuffers(int pagesPerBuffer, int levelStep,
int maxLevel); int maxLevel);

View File

@ -332,6 +332,7 @@ extern XLogReaderState *XLogReaderAllocate(int wal_segment_size,
const char *waldir, const char *waldir,
XLogReaderRoutine *routine, XLogReaderRoutine *routine,
void *private_data); void *private_data);
/* Free an XLogReader */ /* Free an XLogReader */
extern void XLogReaderFree(XLogReaderState *state); extern void XLogReaderFree(XLogReaderState *state);

View File

@ -537,29 +537,29 @@
# array # array
{ aggfnoid => 'array_agg(anynonarray)', aggtransfn => 'array_agg_transfn', { aggfnoid => 'array_agg(anynonarray)', aggtransfn => 'array_agg_transfn',
aggcombinefn => 'array_agg_combine', aggserialfn => 'array_agg_serialize', aggfinalfn => 'array_agg_finalfn', aggcombinefn => 'array_agg_combine',
aggdeserialfn => 'array_agg_deserialize', aggfinalfn => 'array_agg_finalfn', aggserialfn => 'array_agg_serialize',
aggfinalextra => 't', aggtranstype => 'internal' }, aggdeserialfn => 'array_agg_deserialize', aggfinalextra => 't',
aggtranstype => 'internal' },
{ aggfnoid => 'array_agg(anyarray)', aggtransfn => 'array_agg_array_transfn', { aggfnoid => 'array_agg(anyarray)', aggtransfn => 'array_agg_array_transfn',
aggfinalfn => 'array_agg_array_finalfn',
aggcombinefn => 'array_agg_array_combine', aggcombinefn => 'array_agg_array_combine',
aggserialfn => 'array_agg_array_serialize', aggserialfn => 'array_agg_array_serialize',
aggdeserialfn => 'array_agg_array_deserialize', aggdeserialfn => 'array_agg_array_deserialize', aggfinalextra => 't',
aggfinalfn => 'array_agg_array_finalfn', aggfinalextra => 't',
aggtranstype => 'internal' }, aggtranstype => 'internal' },
# text # text
{ aggfnoid => 'string_agg(text,text)', aggtransfn => 'string_agg_transfn', { aggfnoid => 'string_agg(text,text)', aggtransfn => 'string_agg_transfn',
aggcombinefn => 'string_agg_combine', aggserialfn => 'string_agg_serialize', aggfinalfn => 'string_agg_finalfn', aggcombinefn => 'string_agg_combine',
aggdeserialfn => 'string_agg_deserialize', aggserialfn => 'string_agg_serialize',
aggfinalfn => 'string_agg_finalfn', aggtranstype => 'internal' }, aggdeserialfn => 'string_agg_deserialize', aggtranstype => 'internal' },
# bytea # bytea
{ aggfnoid => 'string_agg(bytea,bytea)', { aggfnoid => 'string_agg(bytea,bytea)',
aggtransfn => 'bytea_string_agg_transfn', aggtransfn => 'bytea_string_agg_transfn',
aggcombinefn => 'string_agg_combine', aggfinalfn => 'bytea_string_agg_finalfn',
aggserialfn => 'string_agg_serialize', aggcombinefn => 'string_agg_combine', aggserialfn => 'string_agg_serialize',
aggdeserialfn => 'string_agg_deserialize', aggdeserialfn => 'string_agg_deserialize', aggtranstype => 'internal' },
aggfinalfn => 'bytea_string_agg_finalfn', aggtranstype => 'internal' },
# range # range
{ aggfnoid => 'range_intersect_agg(anyrange)', { aggfnoid => 'range_intersect_agg(anyrange)',

View File

@ -18,6 +18,7 @@
datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't', datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
datallowconn => 't', datconnlimit => '-1', datfrozenxid => '0', datallowconn => 't', datconnlimit => '-1', datfrozenxid => '0',
datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE', datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE',
datctype => 'LC_CTYPE', daticulocale => 'ICU_LOCALE', daticurules => 'ICU_RULES', datacl => '_null_' }, datctype => 'LC_CTYPE', daticulocale => 'ICU_LOCALE',
daticurules => 'ICU_RULES', datacl => '_null_' },
] ]

View File

@ -1667,8 +1667,9 @@
prorettype => 'internal', proargtypes => 'internal anyarray', prorettype => 'internal', proargtypes => 'internal anyarray',
prosrc => 'array_agg_array_transfn' }, prosrc => 'array_agg_array_transfn' },
{ oid => '6296', descr => 'aggregate combine function', { oid => '6296', descr => 'aggregate combine function',
proname => 'array_agg_array_combine', proisstrict => 'f', prorettype => 'internal', proname => 'array_agg_array_combine', proisstrict => 'f',
proargtypes => 'internal internal', prosrc => 'array_agg_array_combine' }, prorettype => 'internal', proargtypes => 'internal internal',
prosrc => 'array_agg_array_combine' },
{ oid => '6297', descr => 'aggregate serial function', { oid => '6297', descr => 'aggregate serial function',
proname => 'array_agg_array_serialize', prorettype => 'bytea', proname => 'array_agg_array_serialize', prorettype => 'bytea',
proargtypes => 'internal', prosrc => 'array_agg_array_serialize' }, proargtypes => 'internal', prosrc => 'array_agg_array_serialize' },
@ -5481,10 +5482,9 @@
prorettype => 'oid', proargtypes => 'int4', prorettype => 'oid', proargtypes => 'int4',
prosrc => 'pg_stat_get_backend_dbid' }, prosrc => 'pg_stat_get_backend_dbid' },
{ oid => '6107', descr => 'statistics: get subtransaction status of backend', { oid => '6107', descr => 'statistics: get subtransaction status of backend',
proname => 'pg_stat_get_backend_subxact', provolatile => 's', proparallel => 'r', proname => 'pg_stat_get_backend_subxact', provolatile => 's',
prorettype => 'record', proargtypes => 'int4', proparallel => 'r', prorettype => 'record', proargtypes => 'int4',
proallargtypes => '{int4,int4,bool}', proallargtypes => '{int4,int4,bool}', proargmodes => '{i,o,o}',
proargmodes => '{i,o,o}',
proargnames => '{bid,subxact_count,subxact_overflowed}', proargnames => '{bid,subxact_count,subxact_overflowed}',
prosrc => 'pg_stat_get_backend_subxact' }, prosrc => 'pg_stat_get_backend_subxact' },
{ oid => '1939', descr => 'statistics: user ID of backend', { oid => '1939', descr => 'statistics: user ID of backend',
@ -5731,9 +5731,9 @@
prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_buf_alloc' }, prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_buf_alloc' },
{ oid => '6214', descr => 'statistics: per backend type IO statistics', { oid => '6214', descr => 'statistics: per backend type IO statistics',
proname => 'pg_stat_get_io', provolatile => 'v', proname => 'pg_stat_get_io', prorows => '30', proretset => 't',
prorows => '30', proretset => 't', provolatile => 'v', proparallel => 'r', prorettype => 'record',
proparallel => 'r', prorettype => 'record', proargtypes => '', proargtypes => '',
proallargtypes => '{text,text,text,int8,float8,int8,float8,int8,float8,int8,float8,int8,int8,int8,int8,int8,float8,timestamptz}', proallargtypes => '{text,text,text,int8,float8,int8,float8,int8,float8,int8,float8,int8,int8,int8,int8,int8,float8,timestamptz}',
proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}',
proargnames => '{backend_type,object,context,reads,read_time,writes,write_time,writebacks,writeback_time,extends,extend_time,op_bytes,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}', proargnames => '{backend_type,object,context,reads,read_time,writes,write_time,writebacks,writeback_time,extends,extend_time,op_bytes,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}',
@ -6407,8 +6407,9 @@
proname => 'pg_switch_wal', provolatile => 'v', prorettype => 'pg_lsn', proname => 'pg_switch_wal', provolatile => 'v', prorettype => 'pg_lsn',
proargtypes => '', prosrc => 'pg_switch_wal' }, proargtypes => '', prosrc => 'pg_switch_wal' },
{ oid => '6305', descr => 'log details of the current snapshot to WAL', { oid => '6305', descr => 'log details of the current snapshot to WAL',
proname => 'pg_log_standby_snapshot', provolatile => 'v', prorettype => 'pg_lsn', proname => 'pg_log_standby_snapshot', provolatile => 'v',
proargtypes => '', prosrc => 'pg_log_standby_snapshot' }, prorettype => 'pg_lsn', proargtypes => '',
prosrc => 'pg_log_standby_snapshot' },
{ oid => '3098', descr => 'create a named restore point', { oid => '3098', descr => 'create a named restore point',
proname => 'pg_create_restore_point', provolatile => 'v', proname => 'pg_create_restore_point', provolatile => 'v',
prorettype => 'pg_lsn', proargtypes => 'text', prorettype => 'pg_lsn', proargtypes => 'text',
@ -10349,15 +10350,15 @@
proargtypes => 'internal', prosrc => 'window_dense_rank_support' }, proargtypes => 'internal', prosrc => 'window_dense_rank_support' },
{ oid => '3103', descr => 'fractional rank within partition', { oid => '3103', descr => 'fractional rank within partition',
proname => 'percent_rank', prosupport => 'window_percent_rank_support', proname => 'percent_rank', prosupport => 'window_percent_rank_support',
prokind => 'w', proisstrict => 'f', prorettype => 'float8', prokind => 'w', proisstrict => 'f', prorettype => 'float8', proargtypes => '',
proargtypes => '', prosrc => 'window_percent_rank' }, prosrc => 'window_percent_rank' },
{ oid => '6306', descr => 'planner support for percent_rank', { oid => '6306', descr => 'planner support for percent_rank',
proname => 'window_percent_rank_support', prorettype => 'internal', proname => 'window_percent_rank_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'window_percent_rank_support' }, proargtypes => 'internal', prosrc => 'window_percent_rank_support' },
{ oid => '3104', descr => 'fractional row number within partition', { oid => '3104', descr => 'fractional row number within partition',
proname => 'cume_dist', prosupport => 'window_cume_dist_support', proname => 'cume_dist', prosupport => 'window_cume_dist_support',
prokind => 'w', proisstrict => 'f', prorettype => 'float8', prokind => 'w', proisstrict => 'f', prorettype => 'float8', proargtypes => '',
proargtypes => '', prosrc => 'window_cume_dist' }, prosrc => 'window_cume_dist' },
{ oid => '6307', descr => 'planner support for cume_dist', { oid => '6307', descr => 'planner support for cume_dist',
proname => 'window_cume_dist_support', prorettype => 'internal', proname => 'window_cume_dist_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'window_cume_dist_support' }, proargtypes => 'internal', prosrc => 'window_cume_dist_support' },
@ -11824,7 +11825,8 @@
provariadic => 'text', proretset => 't', provolatile => 's', provariadic => 'text', proretset => 't', provolatile => 's',
prorettype => 'record', proargtypes => '_text', prorettype => 'record', proargtypes => '_text',
proallargtypes => '{_text,oid,oid,int2vector,pg_node_tree}', proallargtypes => '{_text,oid,oid,int2vector,pg_node_tree}',
proargmodes => '{v,o,o,o,o}', proargnames => '{pubname,pubid,relid,attrs,qual}', proargmodes => '{v,o,o,o,o}',
proargnames => '{pubname,pubid,relid,attrs,qual}',
prosrc => 'pg_get_publication_tables' }, prosrc => 'pg_get_publication_tables' },
{ oid => '6121', { oid => '6121',
descr => 'returns whether a relation can be part of a publication', descr => 'returns whether a relation can be part of a publication',

View File

@ -90,8 +90,8 @@ CATALOG(pg_subscription,6100,SubscriptionRelationId) BKI_SHARED_RELATION BKI_ROW
bool subpasswordrequired; /* Must connection use a password? */ bool subpasswordrequired; /* Must connection use a password? */
bool subrunasowner; /* True if replication should execute as bool subrunasowner; /* True if replication should execute as the
* the subscription owner */ * subscription owner */
#ifdef CATALOG_VARLEN /* variable-length fields start here */ #ifdef CATALOG_VARLEN /* variable-length fields start here */
/* Connection string to the publisher */ /* Connection string to the publisher */

View File

@ -69,9 +69,12 @@ typedef enum printTextLineWrap
typedef enum printXheaderWidthType typedef enum printXheaderWidthType
{ {
/* Expanded header line width variants */ /* Expanded header line width variants */
PRINT_XHEADER_FULL, /* do not truncate header line (this is the default) */ PRINT_XHEADER_FULL, /* do not truncate header line (this is the
PRINT_XHEADER_COLUMN, /* only print header line above the first column */ * default) */
PRINT_XHEADER_PAGE, /* header line must not be longer than terminal width */ PRINT_XHEADER_COLUMN, /* only print header line above the first
* column */
PRINT_XHEADER_PAGE, /* header line must not be longer than
* terminal width */
PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */ PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */
} printXheaderWidthType; } printXheaderWidthType;
@ -110,8 +113,10 @@ typedef struct printTableOpt
enum printFormat format; /* see enum above */ enum printFormat format; /* see enum above */
unsigned short int expanded; /* expanded/vertical output (if supported unsigned short int expanded; /* expanded/vertical output (if supported
* by output format); 0=no, 1=yes, 2=auto */ * by output format); 0=no, 1=yes, 2=auto */
printXheaderWidthType expanded_header_width_type; /* width type for header line in expanded mode */ printXheaderWidthType expanded_header_width_type; /* width type for header
int expanded_header_exact_width; /* explicit width for header line in expanded mode */ * line in expanded mode */
int expanded_header_exact_width; /* explicit width for header
* line in expanded mode */
unsigned short int border; /* Print a border around the table. 0=none, unsigned short int border; /* Print a border around the table. 0=none,
* 1=dividing lines, 2=full */ * 1=dividing lines, 2=full */
unsigned short int pager; /* use pager for output (if to stdout and unsigned short int pager; /* use pager for output (if to stdout and

View File

@ -231,6 +231,7 @@ HeapTupleGetDatum(const HeapTupleData *tuple)
{ {
return HeapTupleHeaderGetDatum(tuple->t_data); return HeapTupleHeaderGetDatum(tuple->t_data);
} }
/* obsolete version of above */ /* obsolete version of above */
#define TupleGetDatum(_slot, _tuple) HeapTupleGetDatum(_tuple) #define TupleGetDatum(_slot, _tuple) HeapTupleGetDatum(_tuple)

View File

@ -1479,6 +1479,7 @@ typedef struct SQLValueFunction
{ {
Expr xpr; Expr xpr;
SQLValueFunctionOp op; /* which function this is */ SQLValueFunctionOp op; /* which function this is */
/* /*
* Result type/typmod. Type is fully determined by "op", so no need to * Result type/typmod. Type is fully determined by "op", so no need to
* include this Oid in the query jumbling. * include this Oid in the query jumbling.

View File

@ -28,7 +28,8 @@ typedef enum LWLockWaitState
{ {
LW_WS_NOT_WAITING, /* not currently waiting / woken up */ LW_WS_NOT_WAITING, /* not currently waiting / woken up */
LW_WS_WAITING, /* currently waiting */ LW_WS_WAITING, /* currently waiting */
LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet signalled */ LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet
* signalled */
} LWLockWaitState; } LWLockWaitState;
/* /*

View File

@ -346,8 +346,8 @@ dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm *tm,
break; break;
/* /*
* The preferred date and time representation for * The preferred date and time representation for the
* the current locale. * current locale.
*/ */
case 'c': case 'c':
/* XXX */ /* XXX */

View File

@ -140,7 +140,8 @@ while (<$parser_fh>)
$block = ''; $block = '';
$in_rule = 0 if $arr[$fieldIndexer] eq ';'; $in_rule = 0 if $arr[$fieldIndexer] eq ';';
} }
elsif (($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:') elsif (
($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:')
|| ( $fieldIndexer + 1 < $n || ( $fieldIndexer + 1 < $n
&& $arr[ $fieldIndexer + 1 ] eq ':')) && $arr[ $fieldIndexer + 1 ] eq ':'))
{ {

View File

@ -2103,10 +2103,9 @@ PQgetResult(PGconn *conn)
/* /*
* We're about to return the NULL that terminates the round of * We're about to return the NULL that terminates the round of
* results from the current query; prepare to send the results * results from the current query; prepare to send the results of
* of the next query, if any, when we're called next. If there's * the next query, if any, when we're called next. If there's no
* no next element in the command queue, this gets us in IDLE * next element in the command queue, this gets us in IDLE state.
* state.
*/ */
pqPipelineProcessQueue(conn); pqPipelineProcessQueue(conn);
res = NULL; /* query is complete */ res = NULL; /* query is complete */
@ -3051,6 +3050,7 @@ pqPipelineProcessQueue(PGconn *conn)
return; return;
case PGASYNC_IDLE: case PGASYNC_IDLE:
/* /*
* If we're in IDLE mode and there's some command in the queue, * If we're in IDLE mode and there's some command in the queue,
* get us into PIPELINE_IDLE mode and process normally. Otherwise * get us into PIPELINE_IDLE mode and process normally. Otherwise

View File

@ -1520,8 +1520,8 @@ open_client_SSL(PGconn *conn)
* it means that verification failed due to a missing * it means that verification failed due to a missing
* system CA pool without it being a protocol error. We * system CA pool without it being a protocol error. We
* inspect the sslrootcert setting to ensure that the user * inspect the sslrootcert setting to ensure that the user
* was using the system CA pool. For other errors, log them * was using the system CA pool. For other errors, log
* using the normal SYSCALL logging. * them using the normal SYSCALL logging.
*/ */
if (!save_errno && vcode == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY && if (!save_errno && vcode == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY &&
strcmp(conn->sslrootcert, "system") == 0) strcmp(conn->sslrootcert, "system") == 0)

Some files were not shown because too many files have changed in this diff Show More