pgindent run prior to branching

This commit is contained in:
Andrew Dunstan 2018-06-30 12:25:49 -04:00
parent 2c64d20048
commit 1e9c858090
18 changed files with 62 additions and 62 deletions

View File

@ -2020,12 +2020,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
/* /*
* If the foreign table is a partition, we need to create a new RTE * If the foreign table is a partition, we need to create a new RTE
* describing the foreign table for use by deparseInsertSql and * describing the foreign table for use by deparseInsertSql and
* create_foreign_modify() below, after first copying the parent's * create_foreign_modify() below, after first copying the parent's RTE and
* RTE and modifying some fields to describe the foreign partition to * modifying some fields to describe the foreign partition to work on.
* work on. However, if this is invoked by UPDATE, the existing RTE * However, if this is invoked by UPDATE, the existing RTE may already
* may already correspond to this partition if it is one of the * correspond to this partition if it is one of the UPDATE subplan target
* UPDATE subplan target rels; in that case, we can just use the * rels; in that case, we can just use the existing RTE as-is.
* existing RTE as-is.
*/ */
rte = list_nth(estate->es_range_table, resultRelation - 1); rte = list_nth(estate->es_range_table, resultRelation - 1);
if (rte->relid != RelationGetRelid(rel)) if (rte->relid != RelationGetRelid(rel))
@ -2035,10 +2034,10 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
rte->relkind = RELKIND_FOREIGN_TABLE; rte->relkind = RELKIND_FOREIGN_TABLE;
/* /*
* For UPDATE, we must use the RT index of the first subplan * For UPDATE, we must use the RT index of the first subplan target
* target rel's RTE, because the core code would have built * rel's RTE, because the core code would have built expressions for
* expressions for the partition, such as RETURNING, using that * the partition, such as RETURNING, using that RT index as varno of
* RT index as varno of Vars contained in those expressions. * Vars contained in those expressions.
*/ */
if (plan && plan->operation == CMD_UPDATE && if (plan && plan->operation == CMD_UPDATE &&
resultRelation == plan->nominalRelation) resultRelation == plan->nominalRelation)

View File

@ -247,9 +247,9 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
metapage = BufferGetPage(metabuffer); metapage = BufferGetPage(metabuffer);
/* /*
* An insertion to the pending list could logically belong anywhere in * An insertion to the pending list could logically belong anywhere in the
* the tree, so it conflicts with all serializable scans. All scans * tree, so it conflicts with all serializable scans. All scans acquire a
* acquire a predicate lock on the metabuffer to represent that. * predicate lock on the metabuffer to represent that.
*/ */
CheckForSerializableConflictIn(index, NULL, metabuffer); CheckForSerializableConflictIn(index, NULL, metabuffer);

View File

@ -235,8 +235,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
LockBuffer(stack->buffer, GIN_UNLOCK); LockBuffer(stack->buffer, GIN_UNLOCK);
/* /*
* Acquire predicate lock on the posting tree. We already hold * Acquire predicate lock on the posting tree. We already hold a
* a lock on the entry page, but insertions to the posting tree * lock on the entry page, but insertions to the posting tree
* don't check for conflicts on that level. * don't check for conflicts on that level.
*/ */
PredicateLockPage(btree->index, rootPostingTree, snapshot); PredicateLockPage(btree->index, rootPostingTree, snapshot);
@ -1766,8 +1766,8 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
*ntids = 0; *ntids = 0;
/* /*
* Acquire predicate lock on the metapage, to conflict with any * Acquire predicate lock on the metapage, to conflict with any fastupdate
* fastupdate insertions. * insertions.
*/ */
PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot); PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);

View File

@ -820,10 +820,10 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
/* /*
* If table receives enough insertions and no cleanup was performed, * If table receives enough insertions and no cleanup was performed,
* then index would appear have stale statistics. If scale factor * then index would appear have stale statistics. If scale factor is
* is set, we avoid that by performing cleanup if the number of * set, we avoid that by performing cleanup if the number of inserted
* inserted tuples exceeds vacuum_cleanup_index_scale_factor fraction * tuples exceeds vacuum_cleanup_index_scale_factor fraction of
* of original tuples count. * original tuples count.
*/ */
relopts = (StdRdOptions *) info->index->rd_options; relopts = (StdRdOptions *) info->index->rd_options;
cleanup_scale_factor = (relopts && cleanup_scale_factor = (relopts &&
@ -873,8 +873,8 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
&oldestBtpoXact); &oldestBtpoXact);
/* /*
* Update cleanup-related information in metapage. This information * Update cleanup-related information in metapage. This information is
* is used only for cleanup but keeping them up to date can avoid * used only for cleanup but keeping them up to date can avoid
* unnecessary cleanup even after bulkdelete. * unnecessary cleanup even after bulkdelete.
*/ */
_bt_update_meta_cleanup_info(info->index, oldestBtpoXact, _bt_update_meta_cleanup_info(info->index, oldestBtpoXact,

View File

@ -2196,8 +2196,8 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
* non-zero, or when there is no explicit representation and the * non-zero, or when there is no explicit representation and the
* tuple is evidently not a pre-pg_upgrade tuple. * tuple is evidently not a pre-pg_upgrade tuple.
* *
* Prior to v11, downlinks always had P_HIKEY as their offset. * Prior to v11, downlinks always had P_HIKEY as their offset. Use
* Use that to decide if the tuple is a pre-v11 tuple. * that to decide if the tuple is a pre-v11 tuple.
*/ */
return BTreeTupleGetNAtts(itup, rel) == 0 || return BTreeTupleGetNAtts(itup, rel) == 0 ||
((itup->t_info & INDEX_ALT_TID_MASK) == 0 && ((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&

View File

@ -829,9 +829,9 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
} }
/* /*
* Check that the address on the page agrees with what we expected. * Check that the address on the page agrees with what we expected. This
* This check typically fails when an old WAL segment is recycled, * check typically fails when an old WAL segment is recycled, and hasn't
* and hasn't yet been overwritten with new data yet. * yet been overwritten with new data yet.
*/ */
if (hdr->xlp_pageaddr != recaddr) if (hdr->xlp_pageaddr != recaddr)
{ {

View File

@ -754,9 +754,9 @@ ReorderBufferAssignChild(ReorderBuffer *rb, TransactionId xid,
else else
{ {
/* /*
* We already saw this transaction, but initially added it to the list * We already saw this transaction, but initially added it to the
* of top-level txns. Now that we know it's not top-level, remove * list of top-level txns. Now that we know it's not top-level,
* it from there. * remove it from there.
*/ */
dlist_delete(&subtxn->node); dlist_delete(&subtxn->node);
} }

View File

@ -2014,8 +2014,8 @@ GetRunningTransactionData(void)
/* /*
* If we wished to exclude xids this would be the right place for it. * If we wished to exclude xids this would be the right place for it.
* Procs with the PROC_IN_VACUUM flag set don't usually assign xids, * Procs with the PROC_IN_VACUUM flag set don't usually assign xids,
* but they do during truncation at the end when they get the lock * but they do during truncation at the end when they get the lock and
* and truncate, so it is not much of a problem to include them if they * truncate, so it is not much of a problem to include them if they
* are seen and it is cleaner to include them. * are seen and it is cleaner to include them.
*/ */

View File

@ -671,6 +671,7 @@ StandbyReleaseLockList(List *locks)
{ {
xl_standby_lock *lock = (xl_standby_lock *) linitial(locks); xl_standby_lock *lock = (xl_standby_lock *) linitial(locks);
LOCKTAG locktag; LOCKTAG locktag;
elog(trace_recovery(DEBUG4), elog(trace_recovery(DEBUG4),
"releasing recovery lock: xid %u db %u rel %u", "releasing recovery lock: xid %u db %u rel %u",
lock->xid, lock->dbOid, lock->relOid); lock->xid, lock->dbOid, lock->relOid);

View File

@ -1316,8 +1316,8 @@ ProcessUtilitySlow(ParseState *pstate,
* acquire locks early to avoid deadlocks. * acquire locks early to avoid deadlocks.
* *
* We also take the opportunity to verify that all * We also take the opportunity to verify that all
* partitions are something we can put an index on, * partitions are something we can put an index on, to
* to avoid building some indexes only to fail later. * avoid building some indexes only to fail later.
*/ */
if (stmt->relation->inh && if (stmt->relation->inh &&
get_rel_relkind(relid) == RELKIND_PARTITIONED_TABLE) get_rel_relkind(relid) == RELKIND_PARTITIONED_TABLE)