pgindent run prior to branching

This commit is contained in:
Andrew Dunstan 2018-06-30 12:25:49 -04:00
parent 2c64d20048
commit 1e9c858090
18 changed files with 62 additions and 62 deletions

View File

@ -2020,12 +2020,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
/*
* If the foreign table is a partition, we need to create a new RTE
* describing the foreign table for use by deparseInsertSql and
* create_foreign_modify() below, after first copying the parent's
* RTE and modifying some fields to describe the foreign partition to
* work on. However, if this is invoked by UPDATE, the existing RTE
* may already correspond to this partition if it is one of the
* UPDATE subplan target rels; in that case, we can just use the
* existing RTE as-is.
* create_foreign_modify() below, after first copying the parent's RTE and
* modifying some fields to describe the foreign partition to work on.
* However, if this is invoked by UPDATE, the existing RTE may already
* correspond to this partition if it is one of the UPDATE subplan target
* rels; in that case, we can just use the existing RTE as-is.
*/
rte = list_nth(estate->es_range_table, resultRelation - 1);
if (rte->relid != RelationGetRelid(rel))
@ -2035,10 +2034,10 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
rte->relkind = RELKIND_FOREIGN_TABLE;
/*
* For UPDATE, we must use the RT index of the first subplan
* target rel's RTE, because the core code would have built
* expressions for the partition, such as RETURNING, using that
* RT index as varno of Vars contained in those expressions.
* For UPDATE, we must use the RT index of the first subplan target
* rel's RTE, because the core code would have built expressions for
* the partition, such as RETURNING, using that RT index as varno of
* Vars contained in those expressions.
*/
if (plan && plan->operation == CMD_UPDATE &&
resultRelation == plan->nominalRelation)

View File

@ -247,9 +247,9 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
metapage = BufferGetPage(metabuffer);
/*
* An insertion to the pending list could logically belong anywhere in
* the tree, so it conflicts with all serializable scans. All scans
* acquire a predicate lock on the metabuffer to represent that.
* An insertion to the pending list could logically belong anywhere in the
* tree, so it conflicts with all serializable scans. All scans acquire a
* predicate lock on the metabuffer to represent that.
*/
CheckForSerializableConflictIn(index, NULL, metabuffer);

View File

@ -235,8 +235,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
LockBuffer(stack->buffer, GIN_UNLOCK);
/*
* Acquire predicate lock on the posting tree. We already hold
* a lock on the entry page, but insertions to the posting tree
* Acquire predicate lock on the posting tree. We already hold a
* lock on the entry page, but insertions to the posting tree
* don't check for conflicts on that level.
*/
PredicateLockPage(btree->index, rootPostingTree, snapshot);
@ -1766,8 +1766,8 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
*ntids = 0;
/*
* Acquire predicate lock on the metapage, to conflict with any
* fastupdate insertions.
* Acquire predicate lock on the metapage, to conflict with any fastupdate
* insertions.
*/
PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);

View File

@ -820,10 +820,10 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
/*
* If table receives enough insertions and no cleanup was performed,
* then index would appear have stale statistics. If scale factor
* is set, we avoid that by performing cleanup if the number of
* inserted tuples exceeds vacuum_cleanup_index_scale_factor fraction
* of original tuples count.
* then index would appear have stale statistics. If scale factor is
* set, we avoid that by performing cleanup if the number of inserted
* tuples exceeds vacuum_cleanup_index_scale_factor fraction of
* original tuples count.
*/
relopts = (StdRdOptions *) info->index->rd_options;
cleanup_scale_factor = (relopts &&
@ -873,8 +873,8 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
&oldestBtpoXact);
/*
* Update cleanup-related information in metapage. This information
* is used only for cleanup but keeping them up to date can avoid
* Update cleanup-related information in metapage. This information is
* used only for cleanup but keeping them up to date can avoid
* unnecessary cleanup even after bulkdelete.
*/
_bt_update_meta_cleanup_info(info->index, oldestBtpoXact,

View File

@ -2196,8 +2196,8 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
* non-zero, or when there is no explicit representation and the
* tuple is evidently not a pre-pg_upgrade tuple.
*
* Prior to v11, downlinks always had P_HIKEY as their offset.
* Use that to decide if the tuple is a pre-v11 tuple.
* Prior to v11, downlinks always had P_HIKEY as their offset. Use
* that to decide if the tuple is a pre-v11 tuple.
*/
return BTreeTupleGetNAtts(itup, rel) == 0 ||
((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&

View File

@ -4512,7 +4512,7 @@ ReadControlFile(void)
errmsg("could not read from control file: %m")));
else
ereport(PANIC,
(errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
(errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
}
pgstat_report_wait_end();

View File

@ -829,9 +829,9 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
}
/*
* Check that the address on the page agrees with what we expected.
* This check typically fails when an old WAL segment is recycled,
* and hasn't yet been overwritten with new data yet.
* Check that the address on the page agrees with what we expected. This
* check typically fails when an old WAL segment is recycled, and hasn't
* yet been overwritten with new data yet.
*/
if (hdr->xlp_pageaddr != recaddr)
{

View File

@ -1002,7 +1002,7 @@ DefineIndex(Oid relationId,
*/
foreach(lc, childStmt->indexParams)
{
IndexElem *ielem = lfirst(lc);
IndexElem *ielem = lfirst(lc);
/*
* If the index parameter is an expression, we must

View File

@ -1865,7 +1865,7 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
EState *estate, bool emitError)
{
ExprContext *econtext;
bool success;
bool success;
/*
* If first time through, build expression state tree for the partition

View File

@ -754,9 +754,9 @@ ReorderBufferAssignChild(ReorderBuffer *rb, TransactionId xid,
else
{
/*
* We already saw this transaction, but initially added it to the list
* of top-level txns. Now that we know it's not top-level, remove
* it from there.
* We already saw this transaction, but initially added it to the
* list of top-level txns. Now that we know it's not top-level,
* remove it from there.
*/
dlist_delete(&subtxn->node);
}

View File

@ -2014,8 +2014,8 @@ GetRunningTransactionData(void)
/*
* If we wished to exclude xids this would be the right place for it.
* Procs with the PROC_IN_VACUUM flag set don't usually assign xids,
* but they do during truncation at the end when they get the lock
* and truncate, so it is not much of a problem to include them if they
* but they do during truncation at the end when they get the lock and
* truncate, so it is not much of a problem to include them if they
* are seen and it is cleaner to include them.
*/

View File

@ -53,8 +53,8 @@ static void LogAccessExclusiveLocks(int nlocks, xl_standby_lock *locks);
*/
typedef struct RecoveryLockListsEntry
{
TransactionId xid;
List *locks;
TransactionId xid;
List *locks;
} RecoveryLockListsEntry;
/*
@ -73,7 +73,7 @@ void
InitRecoveryTransactionEnvironment(void)
{
VirtualTransactionId vxid;
HASHCTL hash_ctl;
HASHCTL hash_ctl;
/*
* Initialize the hash table for tracking the list of locks held by each
@ -671,6 +671,7 @@ StandbyReleaseLockList(List *locks)
{
xl_standby_lock *lock = (xl_standby_lock *) linitial(locks);
LOCKTAG locktag;
elog(trace_recovery(DEBUG4),
"releasing recovery lock: xid %u db %u rel %u",
lock->xid, lock->dbOid, lock->relOid);
@ -728,7 +729,7 @@ StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids)
void
StandbyReleaseAllLocks(void)
{
HASH_SEQ_STATUS status;
HASH_SEQ_STATUS status;
RecoveryLockListsEntry *entry;
elog(trace_recovery(DEBUG2), "release all standby locks");
@ -749,7 +750,7 @@ StandbyReleaseAllLocks(void)
void
StandbyReleaseOldLocks(TransactionId oldxid)
{
HASH_SEQ_STATUS status;
HASH_SEQ_STATUS status;
RecoveryLockListsEntry *entry;
hash_seq_init(&status, RecoveryLockLists);

View File

@ -1316,8 +1316,8 @@ ProcessUtilitySlow(ParseState *pstate,
* acquire locks early to avoid deadlocks.
*
* We also take the opportunity to verify that all
* partitions are something we can put an index on,
* to avoid building some indexes only to fail later.
* partitions are something we can put an index on, to
* avoid building some indexes only to fail later.
*/
if (stmt->relation->inh &&
get_rel_relkind(relid) == RELKIND_PARTITIONED_TABLE)
@ -1328,7 +1328,7 @@ ProcessUtilitySlow(ParseState *pstate,
inheritors = find_all_inheritors(relid, lockmode, NULL);
foreach(lc, inheritors)
{
char relkind = get_rel_relkind(lfirst_oid(lc));
char relkind = get_rel_relkind(lfirst_oid(lc));
if (relkind != RELKIND_RELATION &&
relkind != RELKIND_MATVIEW &&

View File

@ -1902,29 +1902,29 @@ cannotCastJsonbValue(enum jbvType type, const char *sqltype)
{
static const struct
{
enum jbvType type;
const char *msg;
enum jbvType type;
const char *msg;
}
messages[] =
messages[] =
{
{ jbvNull, gettext_noop("cannot cast jsonb null to type %s") },
{ jbvString, gettext_noop("cannot cast jsonb string to type %s") },
{ jbvNumeric, gettext_noop("cannot cast jsonb numeric to type %s") },
{ jbvBool, gettext_noop("cannot cast jsonb boolean to type %s") },
{ jbvArray, gettext_noop("cannot cast jsonb array to type %s") },
{ jbvObject, gettext_noop("cannot cast jsonb object to type %s") },
{ jbvBinary, gettext_noop("cannot cast jsonb array or object to type %s") }
{jbvNull, gettext_noop("cannot cast jsonb null to type %s")},
{jbvString, gettext_noop("cannot cast jsonb string to type %s")},
{jbvNumeric, gettext_noop("cannot cast jsonb numeric to type %s")},
{jbvBool, gettext_noop("cannot cast jsonb boolean to type %s")},
{jbvArray, gettext_noop("cannot cast jsonb array to type %s")},
{jbvObject, gettext_noop("cannot cast jsonb object to type %s")},
{jbvBinary, gettext_noop("cannot cast jsonb array or object to type %s")}
};
int i;
int i;
for(i=0; i<lengthof(messages); i++)
for (i = 0; i < lengthof(messages); i++)
if (messages[i].type == type)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg(messages[i].msg, sqltype)));
/* should be unreachable */
elog(ERROR, "unknown jsonb type: %d", (int)type);
elog(ERROR, "unknown jsonb type: %d", (int) type);
}
Datum

View File

@ -316,7 +316,7 @@ typedef struct _tableInfo
char **attoptions; /* per-attribute options */
Oid *attcollation; /* per-attribute collation selection */
char **attfdwoptions; /* per-attribute fdw options */
char **attmissingval; /* per attribute missing value */
char **attmissingval; /* per attribute missing value */
bool *notnull; /* NOT NULL constraints on attributes */
bool *inhNotNull; /* true if NOT NULL is inherited */
struct _attrDefInfo **attrdefs; /* DEFAULT expressions */

View File

@ -1856,14 +1856,14 @@ psql_completion(const char *text, int start, int end)
/* ALTER INDEX <foo> SET|RESET ( */
else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "("))
COMPLETE_WITH_LIST8("fillfactor", "recheck_on_update",
"vacuum_cleanup_index_scale_factor", /* BTREE */
"vacuum_cleanup_index_scale_factor", /* BTREE */
"fastupdate", "gin_pending_list_limit", /* GIN */
"buffering", /* GiST */
"pages_per_range", "autosummarize" /* BRIN */
);
else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "("))
COMPLETE_WITH_LIST8("fillfactor =", "recheck_on_update =",
"vacuum_cleanup_index_scale_factor =", /* BTREE */
"vacuum_cleanup_index_scale_factor =", /* BTREE */
"fastupdate =", "gin_pending_list_limit =", /* GIN */
"buffering =", /* GiST */
"pages_per_range =", "autosummarize =" /* BRIN */

View File

@ -207,7 +207,7 @@ extern struct XLogRecord *XLogReadRecord(XLogReaderState *state,
/* Validate a page */
extern bool XLogReaderValidatePageHeader(XLogReaderState *state,
XLogRecPtr recptr, char *phdr);
XLogRecPtr recptr, char *phdr);
/* Invalidate read state */
extern void XLogReaderInvalReadState(XLogReaderState *state);

View File

@ -214,7 +214,7 @@ typedef struct ReorderBufferTXN
*/
Snapshot base_snapshot;
XLogRecPtr base_snapshot_lsn;
dlist_node base_snapshot_node; /* link in txns_by_base_snapshot_lsn */
dlist_node base_snapshot_node; /* link in txns_by_base_snapshot_lsn */
/*
* How many ReorderBufferChange's do we have in this txn.