diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 78b0f43ca8..9369cfc265 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -2020,12 +2020,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate, /* * If the foreign table is a partition, we need to create a new RTE * describing the foreign table for use by deparseInsertSql and - * create_foreign_modify() below, after first copying the parent's - * RTE and modifying some fields to describe the foreign partition to - * work on. However, if this is invoked by UPDATE, the existing RTE - * may already correspond to this partition if it is one of the - * UPDATE subplan target rels; in that case, we can just use the - * existing RTE as-is. + * create_foreign_modify() below, after first copying the parent's RTE and + * modifying some fields to describe the foreign partition to work on. + * However, if this is invoked by UPDATE, the existing RTE may already + * correspond to this partition if it is one of the UPDATE subplan target + * rels; in that case, we can just use the existing RTE as-is. */ rte = list_nth(estate->es_range_table, resultRelation - 1); if (rte->relid != RelationGetRelid(rel)) @@ -2035,10 +2034,10 @@ postgresBeginForeignInsert(ModifyTableState *mtstate, rte->relkind = RELKIND_FOREIGN_TABLE; /* - * For UPDATE, we must use the RT index of the first subplan - * target rel's RTE, because the core code would have built - * expressions for the partition, such as RETURNING, using that - * RT index as varno of Vars contained in those expressions. + * For UPDATE, we must use the RT index of the first subplan target + * rel's RTE, because the core code would have built expressions for + * the partition, such as RETURNING, using that RT index as varno of + * Vars contained in those expressions. */ if (plan && plan->operation == CMD_UPDATE && resultRelation == plan->nominalRelation) diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 5f624cf6fa..e32807e62a 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -247,9 +247,9 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) metapage = BufferGetPage(metabuffer); /* - * An insertion to the pending list could logically belong anywhere in - * the tree, so it conflicts with all serializable scans. All scans - * acquire a predicate lock on the metabuffer to represent that. + * An insertion to the pending list could logically belong anywhere in the + * tree, so it conflicts with all serializable scans. All scans acquire a + * predicate lock on the metabuffer to represent that. */ CheckForSerializableConflictIn(index, NULL, metabuffer); diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index ef3cd7dbe2..8466d947ea 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -235,8 +235,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, LockBuffer(stack->buffer, GIN_UNLOCK); /* - * Acquire predicate lock on the posting tree. We already hold - * a lock on the entry page, but insertions to the posting tree + * Acquire predicate lock on the posting tree. We already hold a + * lock on the entry page, but insertions to the posting tree * don't check for conflicts on that level. */ PredicateLockPage(btree->index, rootPostingTree, snapshot); @@ -1766,8 +1766,8 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids) *ntids = 0; /* - * Acquire predicate lock on the metapage, to conflict with any - * fastupdate insertions. + * Acquire predicate lock on the metapage, to conflict with any fastupdate + * insertions. */ PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index cdd0403e1d..e8725fbbe1 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -820,10 +820,10 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) /* * If table receives enough insertions and no cleanup was performed, - * then index would appear have stale statistics. If scale factor - * is set, we avoid that by performing cleanup if the number of - * inserted tuples exceeds vacuum_cleanup_index_scale_factor fraction - * of original tuples count. + * then index would appear have stale statistics. If scale factor is + * set, we avoid that by performing cleanup if the number of inserted + * tuples exceeds vacuum_cleanup_index_scale_factor fraction of + * original tuples count. */ relopts = (StdRdOptions *) info->index->rd_options; cleanup_scale_factor = (relopts && @@ -873,8 +873,8 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, &oldestBtpoXact); /* - * Update cleanup-related information in metapage. This information - * is used only for cleanup but keeping them up to date can avoid + * Update cleanup-related information in metapage. This information is + * used only for cleanup but keeping them up to date can avoid * unnecessary cleanup even after bulkdelete. */ _bt_update_meta_cleanup_info(info->index, oldestBtpoXact, diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index acb944357a..4528e87c83 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -2196,8 +2196,8 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) * non-zero, or when there is no explicit representation and the * tuple is evidently not a pre-pg_upgrade tuple. * - * Prior to v11, downlinks always had P_HIKEY as their offset. - * Use that to decide if the tuple is a pre-v11 tuple. + * Prior to v11, downlinks always had P_HIKEY as their offset. Use + * that to decide if the tuple is a pre-v11 tuple. */ return BTreeTupleGetNAtts(itup, rel) == 0 || ((itup->t_info & INDEX_ALT_TID_MASK) == 0 && diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 1a419aa49b..dcfef36591 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -4512,7 +4512,7 @@ ReadControlFile(void) errmsg("could not read from control file: %m"))); else ereport(PANIC, - (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData)))); + (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData)))); } pgstat_report_wait_end(); diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index 1b000a2ef1..dd96cef8f0 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -829,9 +829,9 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr, } /* - * Check that the address on the page agrees with what we expected. - * This check typically fails when an old WAL segment is recycled, - * and hasn't yet been overwritten with new data yet. + * Check that the address on the page agrees with what we expected. This + * check typically fails when an old WAL segment is recycled, and hasn't + * yet been overwritten with new data yet. */ if (hdr->xlp_pageaddr != recaddr) { diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 576c85f732..0053832195 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -1002,7 +1002,7 @@ DefineIndex(Oid relationId, */ foreach(lc, childStmt->indexParams) { - IndexElem *ielem = lfirst(lc); + IndexElem *ielem = lfirst(lc); /* * If the index parameter is an expression, we must diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 969944cc12..8026fe2438 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1865,7 +1865,7 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError) { ExprContext *econtext; - bool success; + bool success; /* * If first time through, build expression state tree for the partition diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 5f4aa07131..5792cd14a0 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -754,9 +754,9 @@ ReorderBufferAssignChild(ReorderBuffer *rb, TransactionId xid, else { /* - * We already saw this transaction, but initially added it to the list - * of top-level txns. Now that we know it's not top-level, remove - * it from there. + * We already saw this transaction, but initially added it to the + * list of top-level txns. Now that we know it's not top-level, + * remove it from there. */ dlist_delete(&subtxn->node); } diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 7f293d989b..bd20497d81 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -2014,8 +2014,8 @@ GetRunningTransactionData(void) /* * If we wished to exclude xids this would be the right place for it. * Procs with the PROC_IN_VACUUM flag set don't usually assign xids, - * but they do during truncation at the end when they get the lock - * and truncate, so it is not much of a problem to include them if they + * but they do during truncation at the end when they get the lock and + * truncate, so it is not much of a problem to include them if they * are seen and it is cleaner to include them. */ diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 147784c4b6..2e07702895 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -53,8 +53,8 @@ static void LogAccessExclusiveLocks(int nlocks, xl_standby_lock *locks); */ typedef struct RecoveryLockListsEntry { - TransactionId xid; - List *locks; + TransactionId xid; + List *locks; } RecoveryLockListsEntry; /* @@ -73,7 +73,7 @@ void InitRecoveryTransactionEnvironment(void) { VirtualTransactionId vxid; - HASHCTL hash_ctl; + HASHCTL hash_ctl; /* * Initialize the hash table for tracking the list of locks held by each @@ -671,6 +671,7 @@ StandbyReleaseLockList(List *locks) { xl_standby_lock *lock = (xl_standby_lock *) linitial(locks); LOCKTAG locktag; + elog(trace_recovery(DEBUG4), "releasing recovery lock: xid %u db %u rel %u", lock->xid, lock->dbOid, lock->relOid); @@ -728,7 +729,7 @@ StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids) void StandbyReleaseAllLocks(void) { - HASH_SEQ_STATUS status; + HASH_SEQ_STATUS status; RecoveryLockListsEntry *entry; elog(trace_recovery(DEBUG2), "release all standby locks"); @@ -749,7 +750,7 @@ StandbyReleaseAllLocks(void) void StandbyReleaseOldLocks(TransactionId oldxid) { - HASH_SEQ_STATUS status; + HASH_SEQ_STATUS status; RecoveryLockListsEntry *entry; hash_seq_init(&status, RecoveryLockLists); diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index bdfb66fa74..4e1c21298e 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1316,8 +1316,8 @@ ProcessUtilitySlow(ParseState *pstate, * acquire locks early to avoid deadlocks. * * We also take the opportunity to verify that all - * partitions are something we can put an index on, - * to avoid building some indexes only to fail later. + * partitions are something we can put an index on, to + * avoid building some indexes only to fail later. */ if (stmt->relation->inh && get_rel_relkind(relid) == RELKIND_PARTITIONED_TABLE) @@ -1328,7 +1328,7 @@ ProcessUtilitySlow(ParseState *pstate, inheritors = find_all_inheritors(relid, lockmode, NULL); foreach(lc, inheritors) { - char relkind = get_rel_relkind(lfirst_oid(lc)); + char relkind = get_rel_relkind(lfirst_oid(lc)); if (relkind != RELKIND_RELATION && relkind != RELKIND_MATVIEW && diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 6940b11c29..0ae9d7b9c5 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -1902,29 +1902,29 @@ cannotCastJsonbValue(enum jbvType type, const char *sqltype) { static const struct { - enum jbvType type; - const char *msg; + enum jbvType type; + const char *msg; } - messages[] = + messages[] = { - { jbvNull, gettext_noop("cannot cast jsonb null to type %s") }, - { jbvString, gettext_noop("cannot cast jsonb string to type %s") }, - { jbvNumeric, gettext_noop("cannot cast jsonb numeric to type %s") }, - { jbvBool, gettext_noop("cannot cast jsonb boolean to type %s") }, - { jbvArray, gettext_noop("cannot cast jsonb array to type %s") }, - { jbvObject, gettext_noop("cannot cast jsonb object to type %s") }, - { jbvBinary, gettext_noop("cannot cast jsonb array or object to type %s") } + {jbvNull, gettext_noop("cannot cast jsonb null to type %s")}, + {jbvString, gettext_noop("cannot cast jsonb string to type %s")}, + {jbvNumeric, gettext_noop("cannot cast jsonb numeric to type %s")}, + {jbvBool, gettext_noop("cannot cast jsonb boolean to type %s")}, + {jbvArray, gettext_noop("cannot cast jsonb array to type %s")}, + {jbvObject, gettext_noop("cannot cast jsonb object to type %s")}, + {jbvBinary, gettext_noop("cannot cast jsonb array or object to type %s")} }; - int i; + int i; - for(i=0; i SET|RESET ( */ else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "(")) COMPLETE_WITH_LIST8("fillfactor", "recheck_on_update", - "vacuum_cleanup_index_scale_factor", /* BTREE */ + "vacuum_cleanup_index_scale_factor", /* BTREE */ "fastupdate", "gin_pending_list_limit", /* GIN */ "buffering", /* GiST */ "pages_per_range", "autosummarize" /* BRIN */ ); else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "(")) COMPLETE_WITH_LIST8("fillfactor =", "recheck_on_update =", - "vacuum_cleanup_index_scale_factor =", /* BTREE */ + "vacuum_cleanup_index_scale_factor =", /* BTREE */ "fastupdate =", "gin_pending_list_limit =", /* GIN */ "buffering =", /* GiST */ "pages_per_range =", "autosummarize =" /* BRIN */ diff --git a/src/include/access/xlogreader.h b/src/include/access/xlogreader.h index f307b6318d..40116f8ecb 100644 --- a/src/include/access/xlogreader.h +++ b/src/include/access/xlogreader.h @@ -207,7 +207,7 @@ extern struct XLogRecord *XLogReadRecord(XLogReaderState *state, /* Validate a page */ extern bool XLogReaderValidatePageHeader(XLogReaderState *state, - XLogRecPtr recptr, char *phdr); + XLogRecPtr recptr, char *phdr); /* Invalidate read state */ extern void XLogReaderInvalReadState(XLogReaderState *state); diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index f8a295bddc..1f52f6bde7 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -214,7 +214,7 @@ typedef struct ReorderBufferTXN */ Snapshot base_snapshot; XLogRecPtr base_snapshot_lsn; - dlist_node base_snapshot_node; /* link in txns_by_base_snapshot_lsn */ + dlist_node base_snapshot_node; /* link in txns_by_base_snapshot_lsn */ /* * How many ReorderBufferChange's do we have in this txn.