From bdf46af748d0f15f257c99bf06e9e25aba6a24f9 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 26 Apr 2018 14:47:16 -0400 Subject: [PATCH] Post-feature-freeze pgindent run. Discussion: https://postgr.es/m/15719.1523984266@sss.pgh.pa.us --- contrib/amcheck/verify_nbtree.c | 164 +++---- contrib/btree_gin/btree_gin.c | 11 +- contrib/cube/cube.c | 14 +- contrib/jsonb_plperl/jsonb_plperl.c | 4 +- contrib/jsonb_plpython/jsonb_plpython.c | 11 +- contrib/pg_trgm/trgm_gist.c | 6 +- contrib/pg_trgm/trgm_op.c | 32 +- contrib/postgres_fdw/postgres_fdw.c | 4 +- contrib/tcn/tcn.c | 2 +- contrib/test_decoding/test_decoding.c | 6 +- src/backend/access/brin/brin.c | 2 +- src/backend/access/common/heaptuple.c | 4 +- src/backend/access/common/indextuple.c | 4 +- src/backend/access/common/reloptions.c | 8 +- src/backend/access/gin/ginbtree.c | 12 +- src/backend/access/gin/gindatapage.c | 4 +- src/backend/access/gin/ginget.c | 16 +- src/backend/access/gin/gininsert.c | 10 +- src/backend/access/gist/gist.c | 8 +- src/backend/access/heap/heapam.c | 42 +- src/backend/access/nbtree/nbtinsert.c | 55 +-- src/backend/access/nbtree/nbtpage.c | 8 +- src/backend/access/nbtree/nbtree.c | 22 +- src/backend/access/nbtree/nbtsort.c | 20 +- src/backend/access/nbtree/nbtutils.c | 37 +- src/backend/access/spgist/spgdoinsert.c | 11 +- src/backend/access/spgist/spgvalidate.c | 8 +- src/backend/access/transam/twophase.c | 6 +- src/backend/access/transam/xact.c | 10 +- src/backend/access/transam/xlog.c | 39 +- src/backend/catalog/aclchk.c | 17 +- src/backend/catalog/dependency.c | 6 +- src/backend/catalog/index.c | 52 +-- src/backend/catalog/objectaddress.c | 10 +- src/backend/catalog/partition.c | 2 +- src/backend/catalog/pg_constraint.c | 34 +- src/backend/catalog/pg_inherits.c | 2 +- src/backend/commands/alter.c | 2 +- src/backend/commands/cluster.c | 4 +- src/backend/commands/copy.c | 2 +- src/backend/commands/event_trigger.c | 6 +- src/backend/commands/functioncmds.c | 2 +- src/backend/commands/indexcmds.c | 75 ++-- src/backend/commands/lockcmds.c | 2 +- src/backend/commands/policy.c | 2 +- src/backend/commands/portalcmds.c | 6 +- src/backend/commands/statscmds.c | 7 +- src/backend/commands/tablecmds.c | 83 ++-- src/backend/commands/trigger.c | 13 +- src/backend/executor/execExprInterp.c | 2 +- src/backend/executor/execMain.c | 1 + src/backend/executor/execProcnode.c | 7 +- src/backend/executor/execTuples.c | 2 +- src/backend/executor/nodeAgg.c | 2 +- src/backend/executor/nodeGather.c | 2 +- src/backend/executor/nodeGatherMerge.c | 2 +- src/backend/executor/nodeHashjoin.c | 3 +- src/backend/executor/nodeMergejoin.c | 3 +- src/backend/executor/nodeModifyTable.c | 10 +- src/backend/executor/nodeSamplescan.c | 4 +- src/backend/executor/nodeSort.c | 4 +- src/backend/executor/nodeSubplan.c | 2 +- src/backend/executor/nodeValuesscan.c | 4 +- src/backend/jit/llvm/llvmjit_expr.c | 4 +- src/backend/lib/bloomfilter.c | 3 +- src/backend/libpq/be-secure-common.c | 2 +- src/backend/libpq/be-secure-openssl.c | 5 +- src/backend/nodes/bitmapset.c | 1 + src/backend/nodes/read.c | 6 +- src/backend/optimizer/path/allpaths.c | 17 +- src/backend/optimizer/path/indxpath.c | 3 +- src/backend/optimizer/path/joinrels.c | 14 +- src/backend/optimizer/plan/planner.c | 8 +- src/backend/optimizer/prep/prepunion.c | 6 +- src/backend/optimizer/util/plancat.c | 6 +- src/backend/parser/analyze.c | 2 +- src/backend/parser/parse_utilcmd.c | 30 +- src/backend/partitioning/partprune.c | 20 +- src/backend/port/win32_shmem.c | 8 +- src/backend/replication/basebackup.c | 39 +- .../libpqwalreceiver/libpqwalreceiver.c | 6 +- src/backend/replication/logical/logical.c | 2 +- src/backend/replication/logical/proto.c | 4 +- .../replication/logical/reorderbuffer.c | 56 +-- src/backend/replication/logical/worker.c | 22 +- src/backend/replication/pgoutput/pgoutput.c | 4 +- src/backend/replication/slotfuncs.c | 4 +- src/backend/replication/walreceiver.c | 4 +- src/backend/replication/walsender.c | 8 +- src/backend/storage/file/buffile.c | 6 +- src/backend/storage/ipc/shm_mq.c | 7 +- src/backend/tcop/utility.c | 10 +- src/backend/tsearch/to_tsany.c | 2 +- src/backend/utils/adt/amutils.c | 14 +- src/backend/utils/adt/formatting.c | 2 +- src/backend/utils/adt/geo_spgist.c | 10 +- src/backend/utils/adt/jsonb.c | 17 +- src/backend/utils/adt/jsonfuncs.c | 30 +- src/backend/utils/adt/tsquery.c | 38 +- src/backend/utils/cache/relcache.c | 25 +- src/backend/utils/fmgr/fmgr.c | 9 +- src/backend/utils/misc/guc.c | 2 + src/backend/utils/mmgr/portalmem.c | 22 +- src/backend/utils/sort/sharedtuplestore.c | 4 +- src/bin/pg_basebackup/streamutil.c | 6 +- src/bin/pg_ctl/pg_ctl.c | 3 +- src/bin/pg_dump/common.c | 18 +- src/bin/pg_dump/pg_dump.c | 10 +- src/bin/pg_rewind/filemap.c | 14 +- src/bin/pg_upgrade/exec.c | 2 + src/bin/pg_upgrade/server.c | 4 +- src/bin/pgbench/pgbench.c | 414 ++++++++++-------- src/bin/psql/common.c | 4 +- src/bin/psql/mainloop.c | 14 +- src/bin/psql/tab-complete.c | 11 +- src/fe_utils/conditional.c | 3 +- src/include/access/gin_private.h | 2 +- src/include/access/heapam_xlog.h | 2 +- src/include/access/nbtree.h | 12 +- src/include/access/reloptions.h | 2 +- src/include/access/relscan.h | 2 +- src/include/access/spgist_private.h | 4 +- src/include/access/twophase.h | 2 +- src/include/access/xact.h | 4 +- src/include/catalog/pg_class.h | 3 +- src/include/commands/tablecmds.h | 2 +- src/include/common/int.h | 24 +- src/include/common/scram-common.h | 2 +- src/include/common/string.h | 2 +- src/include/executor/execExpr.h | 4 +- src/include/executor/execPartition.h | 8 +- src/include/executor/executor.h | 2 +- src/include/executor/instrument.h | 4 +- src/include/executor/spi_priv.h | 6 +- src/include/executor/tuptable.h | 2 +- src/include/fe_utils/conditional.h | 2 +- src/include/libpq/libpq.h | 4 +- src/include/nodes/execnodes.h | 17 +- src/include/nodes/parsenodes.h | 6 +- src/include/nodes/relation.h | 6 +- src/include/optimizer/cost.h | 4 +- src/include/optimizer/paths.h | 8 +- src/include/parser/parse_func.h | 4 +- src/include/replication/logical.h | 6 +- src/include/replication/logicalproto.h | 4 +- src/include/replication/reorderbuffer.h | 6 +- src/include/replication/walreceiver.h | 8 +- src/include/storage/reinit.h | 2 +- src/include/tcop/utility.h | 3 +- src/include/tsearch/ts_utils.h | 6 +- src/include/utils/jsonapi.h | 17 +- src/include/utils/partcache.h | 2 +- src/include/utils/portal.h | 2 +- src/include/utils/rel.h | 4 +- src/include/utils/resowner_private.h | 4 +- src/include/utils/tuplesort.h | 2 +- src/interfaces/ecpg/ecpglib/data.c | 21 +- src/interfaces/ecpg/preproc/ecpg.c | 3 +- src/interfaces/ecpg/preproc/type.c | 6 +- src/interfaces/libpq/fe-connect.c | 2 +- src/interfaces/libpq/fe-secure-openssl.c | 17 +- src/interfaces/libpq/libpq-int.h | 6 +- src/pl/plpgsql/src/pl_comp.c | 8 +- src/pl/plpgsql/src/pl_exec.c | 12 +- src/pl/plpython/plpy_exec.c | 8 +- src/pl/tcl/pltcl.c | 4 +- .../test_bloomfilter/test_bloomfilter.c | 2 +- 167 files changed, 1174 insertions(+), 1061 deletions(-) diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index a3a5287748..a1438a2855 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -227,12 +227,12 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed) * with heap relation locked first to prevent deadlocking). In hot * standby mode this will raise an error when parentcheck is true. * - * There is no need for the usual indcheckxmin usability horizon test here, - * even in the heapallindexed case, because index undergoing verification - * only needs to have entries for a new transaction snapshot. (If this is - * a parentcheck verification, there is no question about committed or - * recently dead heap tuples lacking index entries due to concurrent - * activity.) + * There is no need for the usual indcheckxmin usability horizon test + * here, even in the heapallindexed case, because index undergoing + * verification only needs to have entries for a new transaction snapshot. + * (If this is a parentcheck verification, there is no question about + * committed or recently dead heap tuples lacking index entries due to + * concurrent activity.) */ indrel = index_open(indrelid, lockmode); @@ -366,8 +366,8 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly, * index fingerprinting should have reached all tuples returned by * IndexBuildHeapScan(). * - * In readonly case, we also check for problems with missing downlinks. - * A second Bloom filter is used for this. + * In readonly case, we also check for problems with missing + * downlinks. A second Bloom filter is used for this. */ if (!state->readonly) { @@ -378,13 +378,13 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly, * READ COMMITTED mode. A new snapshot is guaranteed to have all * the entries it requires in the index. * - * We must defend against the possibility that an old xact snapshot - * was returned at higher isolation levels when that snapshot is - * not safe for index scans of the target index. This is possible - * when the snapshot sees tuples that are before the index's - * indcheckxmin horizon. Throwing an error here should be very - * rare. It doesn't seem worth using a secondary snapshot to avoid - * this. + * We must defend against the possibility that an old xact + * snapshot was returned at higher isolation levels when that + * snapshot is not safe for index scans of the target index. This + * is possible when the snapshot sees tuples that are before the + * index's indcheckxmin horizon. Throwing an error here should be + * very rare. It doesn't seem worth using a secondary snapshot to + * avoid this. */ if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin && !TransactionIdPrecedes(HeapTupleHeaderGetXmin(rel->rd_indextuple->t_data), @@ -396,13 +396,13 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly, } else { - int64 total_pages; + int64 total_pages; /* * Extra readonly downlink check. * - * In readonly case, we know that there cannot be a concurrent page - * split or a concurrent page deletion, which gives us the + * In readonly case, we know that there cannot be a concurrent + * page split or a concurrent page deletion, which gives us the * opportunity to verify that every non-ignorable page had a * downlink one level up. We must be tolerant of interrupted page * splits and page deletions, though. This is taken care of in @@ -491,9 +491,9 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly, } /* - * Create our own scan for IndexBuildHeapScan(), rather than getting it - * to do so for us. This is required so that we can actually use the - * MVCC snapshot registered earlier in !readonly case. + * Create our own scan for IndexBuildHeapScan(), rather than getting + * it to do so for us. This is required so that we can actually use + * the MVCC snapshot registered earlier in !readonly case. * * Note that IndexBuildHeapScan() calls heap_endscan() for us. */ @@ -607,10 +607,10 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) { /* * Since there cannot be a concurrent VACUUM operation in readonly - * mode, and since a page has no links within other pages (siblings - * and parent) once it is marked fully deleted, it should be - * impossible to land on a fully deleted page in readonly mode. - * See bt_downlink_check() for further details. + * mode, and since a page has no links within other pages + * (siblings and parent) once it is marked fully deleted, it + * should be impossible to land on a fully deleted page in + * readonly mode. See bt_downlink_check() for further details. * * The bt_downlink_check() P_ISDELETED() check is repeated here so * that pages that are only reachable through sibling links get @@ -799,8 +799,8 @@ bt_target_page_check(BtreeCheckState *state) P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock); /* - * Check the number of attributes in high key. Note, rightmost page doesn't - * contain a high key, so nothing to check + * Check the number of attributes in high key. Note, rightmost page + * doesn't contain a high key, so nothing to check */ if (!P_RIGHTMOST(topaque) && !_bt_check_natts(state->rel, state->target, P_HIKEY)) @@ -845,8 +845,8 @@ bt_target_page_check(BtreeCheckState *state) /* * lp_len should match the IndexTuple reported length exactly, since - * lp_len is completely redundant in indexes, and both sources of tuple - * length are MAXALIGN()'d. nbtree does not use lp_len all that + * lp_len is completely redundant in indexes, and both sources of + * tuple length are MAXALIGN()'d. nbtree does not use lp_len all that * frequently, and is surprisingly tolerant of corrupt lp_len fields. */ if (tupsize != ItemIdGetLength(itemid)) @@ -1441,13 +1441,13 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock, static void bt_downlink_missing_check(BtreeCheckState *state) { - BTPageOpaque topaque = (BTPageOpaque) PageGetSpecialPointer(state->target); - ItemId itemid; - IndexTuple itup; - Page child; - BTPageOpaque copaque; - uint32 level; - BlockNumber childblk; + BTPageOpaque topaque = (BTPageOpaque) PageGetSpecialPointer(state->target); + ItemId itemid; + IndexTuple itup; + Page child; + BTPageOpaque copaque; + uint32 level; + BlockNumber childblk; Assert(state->heapallindexed && state->readonly); Assert(!P_IGNORE(topaque)); @@ -1462,14 +1462,15 @@ bt_downlink_missing_check(BtreeCheckState *state) * page split in passing, when it notices that the left sibling page is * P_INCOMPLETE_SPLIT(). * - * In general, VACUUM is not prepared for there to be no downlink to a page - * that it deletes. This is the main reason why the lack of a downlink can - * be reported as corruption here. It's not obvious that an invalid - * missing downlink can result in wrong answers to queries, though, since - * index scans that land on the child may end up consistently moving right. - * The handling of concurrent page splits (and page deletions) within - * _bt_moveright() cannot distinguish inconsistencies that last for a - * moment from inconsistencies that are permanent and irrecoverable. + * In general, VACUUM is not prepared for there to be no downlink to a + * page that it deletes. This is the main reason why the lack of a + * downlink can be reported as corruption here. It's not obvious that an + * invalid missing downlink can result in wrong answers to queries, + * though, since index scans that land on the child may end up + * consistently moving right. The handling of concurrent page splits (and + * page deletions) within _bt_moveright() cannot distinguish + * inconsistencies that last for a moment from inconsistencies that are + * permanent and irrecoverable. * * VACUUM isn't even prepared to delete pages that have no downlink due to * an incomplete page split, but it can detect and reason about that case @@ -1498,8 +1499,8 @@ bt_downlink_missing_check(BtreeCheckState *state) /* * Target is probably the "top parent" of a multi-level page deletion. - * We'll need to descend the subtree to make sure that descendant pages are - * consistent with that, though. + * We'll need to descend the subtree to make sure that descendant pages + * are consistent with that, though. * * If the target page (which must be non-ignorable) is a leaf page, then * clearly it can't be the top parent. The lack of a downlink is probably @@ -1562,14 +1563,14 @@ bt_downlink_missing_check(BtreeCheckState *state) * bt_downlink_check() does not visit pages reachable through negative * infinity items. Besides, bt_downlink_check() is unwilling to descend * multiple levels. (The similar bt_downlink_check() P_ISDELETED() check - * within bt_check_level_from_leftmost() won't reach the page either, since - * the leaf's live siblings should have their sibling links updated to - * bypass the deletion target page when it is marked fully dead.) + * within bt_check_level_from_leftmost() won't reach the page either, + * since the leaf's live siblings should have their sibling links updated + * to bypass the deletion target page when it is marked fully dead.) * * If this error is raised, it might be due to a previous multi-level page - * deletion that failed to realize that it wasn't yet safe to mark the leaf - * page as fully dead. A "dangling downlink" will still remain when this - * happens. The fact that the dangling downlink's page (the leaf's + * deletion that failed to realize that it wasn't yet safe to mark the + * leaf page as fully dead. A "dangling downlink" will still remain when + * this happens. The fact that the dangling downlink's page (the leaf's * parent/ancestor page) lacked a downlink is incidental. */ if (P_ISDELETED(copaque)) @@ -1583,14 +1584,14 @@ bt_downlink_missing_check(BtreeCheckState *state) (uint32) state->targetlsn))); /* - * Iff leaf page is half-dead, its high key top parent link should point to - * what VACUUM considered to be the top parent page at the instant it was - * interrupted. Provided the high key link actually points to the target - * page, the missing downlink we detected is consistent with there having - * been an interrupted multi-level page deletion. This means that the - * subtree with the target page at its root (a page deletion chain) is in a - * consistent state, enabling VACUUM to resume deleting the entire chain - * the next time it encounters the half-dead leaf page. + * Iff leaf page is half-dead, its high key top parent link should point + * to what VACUUM considered to be the top parent page at the instant it + * was interrupted. Provided the high key link actually points to the + * target page, the missing downlink we detected is consistent with there + * having been an interrupted multi-level page deletion. This means that + * the subtree with the target page at its root (a page deletion chain) is + * in a consistent state, enabling VACUUM to resume deleting the entire + * chain the next time it encounters the half-dead leaf page. */ if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque)) { @@ -1681,16 +1682,17 @@ bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values, * are assumed immutable. While the LP_DEAD bit is mutable in leaf pages, * that's ItemId metadata, which was not fingerprinted. (There will often * be some dead-to-everyone IndexTuples fingerprinted by the Bloom filter, - * but we only try to detect the absence of needed tuples, so that's okay.) + * but we only try to detect the absence of needed tuples, so that's + * okay.) * - * Note that we rely on deterministic index_form_tuple() TOAST compression. - * If index_form_tuple() was ever enhanced to compress datums out-of-line, - * or otherwise varied when or how compression was applied, our assumption - * would break, leading to false positive reports of corruption. It's also - * possible that non-pivot tuples could in the future have alternative - * equivalent representations (e.g. by using the INDEX_ALT_TID_MASK bit). - * For now, we don't decompress/normalize toasted values as part of - * fingerprinting. + * Note that we rely on deterministic index_form_tuple() TOAST + * compression. If index_form_tuple() was ever enhanced to compress datums + * out-of-line, or otherwise varied when or how compression was applied, + * our assumption would break, leading to false positive reports of + * corruption. It's also possible that non-pivot tuples could in the + * future have alternative equivalent representations (e.g. by using the + * INDEX_ALT_TID_MASK bit). For now, we don't decompress/normalize toasted + * values as part of fingerprinting. */ itup = index_form_tuple(RelationGetDescr(index), values, isnull); itup->t_tid = htup->t_self; @@ -1905,19 +1907,19 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) * Sanity checks for number of items on page. * * As noted at the beginning of _bt_binsrch(), an internal page must have - * children, since there must always be a negative infinity downlink (there - * may also be a highkey). In the case of non-rightmost leaf pages, there - * must be at least a highkey. + * children, since there must always be a negative infinity downlink + * (there may also be a highkey). In the case of non-rightmost leaf + * pages, there must be at least a highkey. * - * This is correct when pages are half-dead, since internal pages are never - * half-dead, and leaf pages must have a high key when half-dead (the - * rightmost page can never be deleted). It's also correct with fully - * deleted pages: _bt_unlink_halfdead_page() doesn't change anything about - * the target page other than setting the page as fully dead, and setting - * its xact field. In particular, it doesn't change the sibling links in - * the deletion target itself, since they're required when index scans land - * on the deletion target, and then need to move right (or need to move - * left, in the case of backward index scans). + * This is correct when pages are half-dead, since internal pages are + * never half-dead, and leaf pages must have a high key when half-dead + * (the rightmost page can never be deleted). It's also correct with + * fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything + * about the target page other than setting the page as fully dead, and + * setting its xact field. In particular, it doesn't change the sibling + * links in the deletion target itself, since they're required when index + * scans land on the deletion target, and then need to move right (or need + * to move left, in the case of backward index scans). */ maxoffset = PageGetMaxOffsetNumber(page); if (maxoffset > MaxIndexTuplesPerPage) diff --git a/contrib/btree_gin/btree_gin.c b/contrib/btree_gin/btree_gin.c index a660681e58..d262c18e89 100644 --- a/contrib/btree_gin/btree_gin.c +++ b/contrib/btree_gin/btree_gin.c @@ -483,8 +483,12 @@ GIN_SUPPORT(anyenum, false, leftmostvalue_enum, gin_enum_cmp) static Datum leftmostvalue_uuid(void) { - /* palloc0 will create the UUID with all zeroes: "00000000-0000-0000-0000-000000000000" */ - pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t)); + /* + * palloc0 will create the UUID with all zeroes: + * "00000000-0000-0000-0000-000000000000" + */ + pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t)); + return UUIDPGetDatum(retval); } @@ -493,7 +497,8 @@ GIN_SUPPORT(uuid, false, leftmostvalue_uuid, uuid_cmp) static Datum leftmostvalue_name(void) { - NameData* result = (NameData *) palloc0(NAMEDATALEN); + NameData *result = (NameData *) palloc0(NAMEDATALEN); + return NameGetDatum(result); } diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c index d96ca1ec1f..092ef149cf 100644 --- a/contrib/cube/cube.c +++ b/contrib/cube/cube.c @@ -1361,9 +1361,10 @@ g_cube_distance(PG_FUNCTION_ARGS) if (coord <= 2 * DIM(cube)) { /* dimension index */ - int index = (coord - 1) / 2; + int index = (coord - 1) / 2; + /* whether this is upper bound (lower bound otherwise) */ - bool upper = ((coord - 1) % 2 == 1); + bool upper = ((coord - 1) % 2 == 1); if (IS_POINT(cube)) { @@ -1596,9 +1597,10 @@ cube_coord_llur(PG_FUNCTION_ARGS) if (coord <= 2 * DIM(cube)) { /* dimension index */ - int index = (coord - 1) / 2; + int index = (coord - 1) / 2; + /* whether this is upper bound (lower bound otherwise) */ - bool upper = ((coord - 1) % 2 == 1); + bool upper = ((coord - 1) % 2 == 1); if (IS_POINT(cube)) { @@ -1615,8 +1617,8 @@ cube_coord_llur(PG_FUNCTION_ARGS) else { /* - * Return zero if coordinate is out of bound. That reproduces logic of - * how cubes with low dimension number are expanded during GiST + * Return zero if coordinate is out of bound. That reproduces logic + * of how cubes with low dimension number are expanded during GiST * indexing. */ result = 0.0; diff --git a/contrib/jsonb_plperl/jsonb_plperl.c b/contrib/jsonb_plperl/jsonb_plperl.c index 837bae2ab5..cde38b295c 100644 --- a/contrib/jsonb_plperl/jsonb_plperl.c +++ b/contrib/jsonb_plperl/jsonb_plperl.c @@ -18,7 +18,7 @@ static SV *Jsonb_to_SV(JsonbContainer *jsonb); static JsonbValue *SV_to_JsonbValue(SV *obj, JsonbParseState **ps, bool is_elem); -static SV * +static SV * JsonbValue_to_SV(JsonbValue *jbv) { dTHX; @@ -33,6 +33,7 @@ JsonbValue_to_SV(JsonbValue *jbv) char *str = DatumGetCString(DirectFunctionCall1(numeric_out, NumericGetDatum(jbv->val.numeric))); SV *result = newSVnv(SvNV(cstr2sv(str))); + pfree(str); return result; } @@ -42,6 +43,7 @@ JsonbValue_to_SV(JsonbValue *jbv) char *str = pnstrdup(jbv->val.string.val, jbv->val.string.len); SV *result = cstr2sv(str); + pfree(str); return result; } diff --git a/contrib/jsonb_plpython/jsonb_plpython.c b/contrib/jsonb_plpython/jsonb_plpython.c index 548826f592..08a7598aae 100644 --- a/contrib/jsonb_plpython/jsonb_plpython.c +++ b/contrib/jsonb_plpython/jsonb_plpython.c @@ -25,7 +25,7 @@ static PyObject *decimal_constructor; static PyObject *PLyObject_FromJsonbContainer(JsonbContainer *jsonb); static JsonbValue *PLyObject_ToJsonbValue(PyObject *obj, - JsonbParseState **jsonb_state, bool is_elem); + JsonbParseState **jsonb_state, bool is_elem); #if PY_MAJOR_VERSION >= 3 typedef PyObject *(*PLyUnicode_FromStringAndSize_t) @@ -373,10 +373,11 @@ PLyObject_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state, bool is_ele out->type = jbvNull; else if (PyString_Check(obj) || PyUnicode_Check(obj)) PLyString_ToJsonbValue(obj, out); - /* - * PyNumber_Check() returns true for booleans, so boolean check should come - * first. - */ + + /* + * PyNumber_Check() returns true for booleans, so boolean check should + * come first. + */ else if (PyBool_Check(obj)) { out = palloc(sizeof(JsonbValue)); diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c index 53e6830ab1..f1e05478da 100644 --- a/contrib/pg_trgm/trgm_gist.c +++ b/contrib/pg_trgm/trgm_gist.c @@ -292,7 +292,11 @@ gtrgm_consistent(PG_FUNCTION_ARGS) case SimilarityStrategyNumber: case WordSimilarityStrategyNumber: case StrictWordSimilarityStrategyNumber: - /* Similarity search is exact. (Strict) word similarity search is inexact */ + + /* + * Similarity search is exact. (Strict) word similarity search is + * inexact + */ *recheck = (strategy != SimilarityStrategyNumber); nlimit = index_strategy_get_limit(strategy); diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c index 67cca9703f..9f26725ec2 100644 --- a/contrib/pg_trgm/trgm_op.c +++ b/contrib/pg_trgm/trgm_op.c @@ -48,14 +48,14 @@ typedef struct /* Trigram bound type */ typedef uint8 TrgmBound; -#define TRGM_BOUND_LEFT 0x01 /* trigram is left bound of word */ -#define TRGM_BOUND_RIGHT 0x02 /* trigram is right bound of word */ +#define TRGM_BOUND_LEFT 0x01 /* trigram is left bound of word */ +#define TRGM_BOUND_RIGHT 0x02 /* trigram is right bound of word */ /* Word similarity flags */ -#define WORD_SIMILARITY_CHECK_ONLY 0x01 /* only check existence of similar - * search pattern in text */ -#define WORD_SIMILARITY_STRICT 0x02 /* force bounds of extent to match - * word bounds */ +#define WORD_SIMILARITY_CHECK_ONLY 0x01 /* only check existence of similar + * search pattern in text */ +#define WORD_SIMILARITY_STRICT 0x02 /* force bounds of extent to match + * word bounds */ /* * Module load callback @@ -144,7 +144,7 @@ index_strategy_get_limit(StrategyNumber strategy) break; } - return 0.0; /* keep compiler quiet */ + return 0.0; /* keep compiler quiet */ } /* @@ -496,13 +496,13 @@ iterate_word_similarity(int *trg2indexes, /* Select appropriate threshold */ threshold = (flags & WORD_SIMILARITY_STRICT) ? - strict_word_similarity_threshold : - word_similarity_threshold; + strict_word_similarity_threshold : + word_similarity_threshold; /* - * Consider first trigram as initial lower bount for strict word similarity, - * or initialize it later with first trigram present for plain word - * similarity. + * Consider first trigram as initial lower bount for strict word + * similarity, or initialize it later with first trigram present for plain + * word similarity. */ lower = (flags & WORD_SIMILARITY_STRICT) ? 0 : -1; @@ -533,7 +533,7 @@ iterate_word_similarity(int *trg2indexes, * plain word similarity */ if ((flags & WORD_SIMILARITY_STRICT) ? (bounds[i] & TRGM_BOUND_RIGHT) - : found[trgindex]) + : found[trgindex]) { int prev_lower, tmp_ulen2, @@ -597,8 +597,8 @@ iterate_word_similarity(int *trg2indexes, smlr_max = Max(smlr_max, smlr_cur); /* - * if we only check that word similarity is greater than - * threshold we do not need to calculate a maximum similarity. + * if we only check that word similarity is greater than threshold + * we do not need to calculate a maximum similarity. */ if ((flags & WORD_SIMILARITY_CHECK_ONLY) && smlr_max >= threshold) break; @@ -653,7 +653,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2, ulen1; int *trg2indexes; float4 result; - TrgmBound *bounds; + TrgmBound *bounds; protect_out_of_mem(slen1 + slen2); diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index a46160df7c..312581b741 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -4918,8 +4918,8 @@ add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel, &rows, &width, &startup_cost, &total_cost); /* - * The EPQ path must be at least as well sorted as the path itself, - * in case it gets used as input to a mergejoin. + * The EPQ path must be at least as well sorted as the path itself, in + * case it gets used as input to a mergejoin. */ sorted_epq_path = epq_path; if (sorted_epq_path != NULL && diff --git a/contrib/tcn/tcn.c b/contrib/tcn/tcn.c index 43bdd92749..0c274322bd 100644 --- a/contrib/tcn/tcn.c +++ b/contrib/tcn/tcn.c @@ -138,7 +138,7 @@ triggered_change_notification(PG_FUNCTION_ARGS) /* we're only interested if it is the primary key and valid */ if (index->indisprimary && IndexIsValid(index)) { - int indnkeyatts = index->indnkeyatts; + int indnkeyatts = index->indnkeyatts; if (indnkeyatts > 0) { diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index e192d5b4ad..1c439b57b0 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -53,9 +53,9 @@ static void pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change); static void pg_decode_truncate(LogicalDecodingContext *ctx, - ReorderBufferTXN *txn, - int nrelations, Relation relations[], - ReorderBufferChange *change); + ReorderBufferTXN *txn, + int nrelations, Relation relations[], + ReorderBufferChange *change); static bool pg_decode_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); static void pg_decode_message(LogicalDecodingContext *ctx, diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index e716f51503..60e650dfee 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -189,7 +189,7 @@ brininsert(Relation idxRel, Datum *values, bool *nulls, NULL, BUFFER_LOCK_SHARE, NULL); if (!lastPageTuple) { - bool recorded; + bool recorded; recorded = AutoVacuumRequestWork(AVW_BRINSummarizeRange, RelationGetRelid(idxRel), diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index b9802b92c0..104172184f 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -1685,8 +1685,8 @@ slot_getsomeattrs(TupleTableSlot *slot, int attnum) attno = slot->tts_nvalid; /* - * If tuple doesn't have all the atts indicated by attnum, read the - * rest as NULLs or missing values + * If tuple doesn't have all the atts indicated by attnum, read the rest + * as NULLs or missing values */ if (attno < attnum) slot_getmissingattrs(slot, attno, attnum); diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index ca690e522f..aa52a96259 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -489,8 +489,8 @@ index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, Assert(IndexTupleSize(truncated) <= IndexTupleSize(source)); /* - * Cannot leak memory here, TupleDescCopy() doesn't allocate any - * inner structure, so, plain pfree() should clean all allocated memory + * Cannot leak memory here, TupleDescCopy() doesn't allocate any inner + * structure, so, plain pfree() should clean all allocated memory */ pfree(truncdesc); diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 69ab2f101c..e0c9c3431c 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -1495,9 +1495,9 @@ index_reloptions(amoptions_function amoptions, Datum reloptions, bool validate) bytea * index_generic_reloptions(Datum reloptions, bool validate) { - int numoptions; + int numoptions; GenericIndexOpts *idxopts; - relopt_value *options; + relopt_value *options; static const relopt_parse_elt tab[] = { {"recheck_on_update", RELOPT_TYPE_BOOL, offsetof(GenericIndexOpts, recheck_on_update)} }; @@ -1512,12 +1512,12 @@ index_generic_reloptions(Datum reloptions, bool validate) idxopts = allocateReloptStruct(sizeof(GenericIndexOpts), options, numoptions); - fillRelOptions((void *)idxopts, sizeof(GenericIndexOpts), options, numoptions, + fillRelOptions((void *) idxopts, sizeof(GenericIndexOpts), options, numoptions, validate, tab, lengthof(tab)); pfree(options); - return (bytea*) idxopts; + return (bytea *) idxopts; } /* diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index 095b1192cb..828c7074b7 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -521,12 +521,12 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, { PredicateLockPageSplit(btree->index, - BufferGetBlockNumber(stack->buffer), - BufferGetBlockNumber(lbuffer)); + BufferGetBlockNumber(stack->buffer), + BufferGetBlockNumber(lbuffer)); PredicateLockPageSplit(btree->index, - BufferGetBlockNumber(stack->buffer), - BufferGetBlockNumber(rbuffer)); + BufferGetBlockNumber(stack->buffer), + BufferGetBlockNumber(rbuffer)); } } @@ -543,8 +543,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, { PredicateLockPageSplit(btree->index, - BufferGetBlockNumber(stack->buffer), - BufferGetBlockNumber(rbuffer)); + BufferGetBlockNumber(stack->buffer), + BufferGetBlockNumber(rbuffer)); } } diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index 642ca1a2c7..59bf21744f 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -1812,8 +1812,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, blkno = BufferGetBlockNumber(buffer); /* - * Copy a predicate lock from entry tree leaf (containing posting list) - * to posting tree. + * Copy a predicate lock from entry tree leaf (containing posting list) to + * posting tree. */ PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno); diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index 0e984166fa..f3db7cc640 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -42,11 +42,11 @@ static void GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot) { /* - * When fast update is on then no need in locking pages, because we - * anyway need to lock the whole index. + * When fast update is on then no need in locking pages, because we anyway + * need to lock the whole index. */ if (!GinGetUseFastUpdate(index)) - PredicateLockPage(index, blkno, snapshot); + PredicateLockPage(index, blkno, snapshot); } /* @@ -426,8 +426,8 @@ restartScanEntry: entry->buffer = stack->buffer; /* - * Predicate lock visited posting tree page, following pages - * will be locked by moveRightIfItNeeded or entryLoadMoreItems + * Predicate lock visited posting tree page, following pages will + * be locked by moveRightIfItNeeded or entryLoadMoreItems */ GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot); @@ -1779,9 +1779,9 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids) UnlockReleaseBuffer(metabuffer); /* - * If fast update is enabled, we acquire a predicate lock on the entire - * relation as fast update postpones the insertion of tuples into index - * structure due to which we can't detect rw conflicts. + * If fast update is enabled, we acquire a predicate lock on the + * entire relation as fast update postpones the insertion of tuples + * into index structure due to which we can't detect rw conflicts. */ if (GinGetUseFastUpdate(scan->indexRelation)) PredicateLockRelation(scan->indexRelation, scan->xs_snapshot); diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index ec5eebb848..cf218dd75d 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -519,12 +519,12 @@ gininsert(Relation index, Datum *values, bool *isnull, /* * With fastupdate on each scan and each insert begin with access to - * pending list, so it effectively lock entire index. In this case - * we aquire predicate lock and check for conflicts over index relation, + * pending list, so it effectively lock entire index. In this case we + * aquire predicate lock and check for conflicts over index relation, * and hope that it will reduce locking overhead. * - * Do not use GinCheckForSerializableConflictIn() here, because - * it will do nothing (it does actual work only with fastupdate off). + * Do not use GinCheckForSerializableConflictIn() here, because it + * will do nothing (it does actual work only with fastupdate off). * Check for conflicts for entire index. */ CheckForSerializableConflictIn(index, NULL, InvalidBuffer); @@ -539,7 +539,7 @@ gininsert(Relation index, Datum *values, bool *isnull, } else { - GinStatsData stats; + GinStatsData stats; /* * Fastupdate is off but if pending list isn't empty then we need to diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 9007d65ad2..f7a9168925 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -341,8 +341,8 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, ptr->page = BufferGetPage(ptr->buffer); ptr->block.blkno = BufferGetBlockNumber(ptr->buffer); PredicateLockPageSplit(rel, - BufferGetBlockNumber(buffer), - BufferGetBlockNumber(ptr->buffer)); + BufferGetBlockNumber(buffer), + BufferGetBlockNumber(ptr->buffer)); } /* @@ -1220,8 +1220,8 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, bool is_split; /* - * Check for any rw conflicts (in serialisation isolation level) - * just before we intend to modify the page + * Check for any rw conflicts (in serialisation isolation level) just + * before we intend to modify the page */ CheckForSerializableConflictIn(state->r, NULL, stack->buffer); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 4fdb549099..1a672150be 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -3460,7 +3460,7 @@ simple_heap_delete(Relation relation, ItemPointer tid) result = heap_delete(relation, tid, GetCurrentCommandId(true), InvalidSnapshot, true /* wait for commit */ , - &hufd, false /* changingPart */); + &hufd, false /* changingPart */ ); switch (result) { case HeapTupleSelfUpdated: @@ -4483,29 +4483,31 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, * functional index. Compare the new and old values of the indexed * expression to see if we are able to use a HOT update or not. */ -static bool ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup) +static bool +ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup) { - ListCell *l; - List *indexoidlist = RelationGetIndexList(relation); - EState *estate = CreateExecutorState(); - ExprContext *econtext = GetPerTupleExprContext(estate); + ListCell *l; + List *indexoidlist = RelationGetIndexList(relation); + EState *estate = CreateExecutorState(); + ExprContext *econtext = GetPerTupleExprContext(estate); TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation)); - bool equals = true; - Datum old_values[INDEX_MAX_KEYS]; - bool old_isnull[INDEX_MAX_KEYS]; - Datum new_values[INDEX_MAX_KEYS]; - bool new_isnull[INDEX_MAX_KEYS]; - int indexno = 0; + bool equals = true; + Datum old_values[INDEX_MAX_KEYS]; + bool old_isnull[INDEX_MAX_KEYS]; + Datum new_values[INDEX_MAX_KEYS]; + bool new_isnull[INDEX_MAX_KEYS]; + int indexno = 0; + econtext->ecxt_scantuple = slot; foreach(l, indexoidlist) { if (bms_is_member(indexno, relation->rd_projidx)) { - Oid indexOid = lfirst_oid(l); - Relation indexDesc = index_open(indexOid, AccessShareLock); + Oid indexOid = lfirst_oid(l); + Relation indexDesc = index_open(indexOid, AccessShareLock); IndexInfo *indexInfo = BuildIndexInfo(indexDesc); - int i; + int i; ResetExprContext(econtext); ExecStoreTuple(oldtup, slot, InvalidBuffer, false); @@ -4532,6 +4534,7 @@ static bool ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple else if (!old_isnull[i]) { Form_pg_attribute att = TupleDescAttr(RelationGetDescr(indexDesc), i); + if (!datumIsEqual(old_values[i], new_values[i], att->attbyval, att->attlen)) { equals = false; @@ -6533,8 +6536,8 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, /* * This old multi cannot possibly have members still running, but * verify just in case. If it was a locker only, it can be removed - * without any further consideration; but if it contained an update, we - * might need to preserve it. + * without any further consideration; but if it contained an update, + * we might need to preserve it. */ if (MultiXactIdIsRunning(multi, HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))) @@ -6681,8 +6684,8 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, else { /* - * Not in progress, not committed -- must be aborted or crashed; - * we can ignore it. + * Not in progress, not committed -- must be aborted or + * crashed; we can ignore it. */ } @@ -9275,6 +9278,7 @@ heap_redo(XLogReaderState *record) heap_xlog_update(record, false); break; case XLOG_HEAP_TRUNCATE: + /* * TRUNCATE is a no-op because the actions are already logged as * SMGR WAL records. TRUNCATE WAL record only exists for logical diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index ecf4e53502..3d5936f186 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -132,31 +132,31 @@ _bt_doinsert(Relation rel, IndexTuple itup, * rightmost leaf, has enough free space to accommodate a new entry and * the insertion key is strictly greater than the first key in this page, * then we can safely conclude that the new key will be inserted in the - * cached block. So we simply search within the cached block and insert the - * key at the appropriate location. We call it a fastpath. + * cached block. So we simply search within the cached block and insert + * the key at the appropriate location. We call it a fastpath. * * Testing has revealed, though, that the fastpath can result in increased * contention on the exclusive-lock on the rightmost leaf page. So we - * conditionally check if the lock is available. If it's not available then - * we simply abandon the fastpath and take the regular path. This makes - * sense because unavailability of the lock also signals that some other - * backend might be concurrently inserting into the page, thus reducing our - * chances to finding an insertion place in this page. + * conditionally check if the lock is available. If it's not available + * then we simply abandon the fastpath and take the regular path. This + * makes sense because unavailability of the lock also signals that some + * other backend might be concurrently inserting into the page, thus + * reducing our chances to finding an insertion place in this page. */ top: fastpath = false; offset = InvalidOffsetNumber; if (RelationGetTargetBlock(rel) != InvalidBlockNumber) { - Size itemsz; - Page page; - BTPageOpaque lpageop; + Size itemsz; + Page page; + BTPageOpaque lpageop; /* * Conditionally acquire exclusive lock on the buffer before doing any * checks. If we don't get the lock, we simply follow slowpath. If we - * do get the lock, this ensures that the index state cannot change, as - * far as the rightmost part of the index is concerned. + * do get the lock, this ensures that the index state cannot change, + * as far as the rightmost part of the index is concerned. */ buf = ReadBuffer(rel, RelationGetTargetBlock(rel)); @@ -173,8 +173,8 @@ top: /* * Check if the page is still the rightmost leaf page, has enough - * free space to accommodate the new tuple, and the insertion - * scan key is strictly greater than the first key on the page. + * free space to accommodate the new tuple, and the insertion scan + * key is strictly greater than the first key on the page. */ if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) && !P_IGNORE(lpageop) && @@ -207,8 +207,8 @@ top: ReleaseBuffer(buf); /* - * If someone's holding a lock, it's likely to change anyway, - * so don't try again until we get an updated rightmost leaf. + * If someone's holding a lock, it's likely to change anyway, so + * don't try again until we get an updated rightmost leaf. */ RelationSetTargetBlock(rel, InvalidBlockNumber); } @@ -882,22 +882,22 @@ _bt_insertonpg(Relation rel, Buffer rbuf; /* - * If we're here then a pagesplit is needed. We should never reach here - * if we're using the fastpath since we should have checked for all the - * required conditions, including the fact that this page has enough - * freespace. Note that this routine can in theory deal with the - * situation where a NULL stack pointer is passed (that's what would - * happen if the fastpath is taken), like it does during crash + * If we're here then a pagesplit is needed. We should never reach + * here if we're using the fastpath since we should have checked for + * all the required conditions, including the fact that this page has + * enough freespace. Note that this routine can in theory deal with + * the situation where a NULL stack pointer is passed (that's what + * would happen if the fastpath is taken), like it does during crash * recovery. But that path is much slower, defeating the very purpose - * of the optimization. The following assertion should protect us from - * any future code changes that invalidate those assumptions. + * of the optimization. The following assertion should protect us + * from any future code changes that invalidate those assumptions. * * Note that whenever we fail to take the fastpath, we clear the * cached block. Checking for a valid cached block at this point is * enough to decide whether we're in a fastpath or not. */ Assert(!(P_ISLEAF(lpageop) && - BlockNumberIsValid(RelationGetTargetBlock(rel)))); + BlockNumberIsValid(RelationGetTargetBlock(rel)))); /* Choose the split point */ firstright = _bt_findsplitloc(rel, page, @@ -936,7 +936,7 @@ _bt_insertonpg(Relation rel, BTMetaPageData *metad = NULL; OffsetNumber itup_off; BlockNumber itup_blkno; - BlockNumber cachedBlock = InvalidBlockNumber; + BlockNumber cachedBlock = InvalidBlockNumber; itup_off = newitemoff; itup_blkno = BufferGetBlockNumber(buf); @@ -1093,7 +1093,8 @@ _bt_insertonpg(Relation rel, * We do this after dropping locks on all buffers. So the information * about whether the insertion block is still the rightmost block or * not may have changed in between. But we will deal with that during - * next insert operation. No special care is required while setting it. + * next insert operation. No special care is required while setting + * it. */ if (BlockNumberIsValid(cachedBlock) && _bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL) diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 3be229db1f..3bcc56e9d2 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -155,11 +155,11 @@ void _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, float8 numHeapTuples) { - Buffer metabuf; - Page metapg; + Buffer metabuf; + Page metapg; BTMetaPageData *metad; - bool needsRewrite = false; - XLogRecPtr recptr; + bool needsRewrite = false; + XLogRecPtr recptr; /* read the metapage and check if it needs rewrite */ metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index d97f5249de..e5dce00876 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -785,10 +785,10 @@ _bt_parallel_advance_array_keys(IndexScanDesc scan) static bool _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) { - Buffer metabuf; - Page metapg; + Buffer metabuf; + Page metapg; BTMetaPageData *metad; - bool result = false; + bool result = false; metabuf = _bt_getbuf(info->index, BTREE_METAPAGE, BT_READ); metapg = BufferGetPage(metabuf); @@ -814,8 +814,8 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) } else { - StdRdOptions *relopts; - float8 cleanup_scale_factor; + StdRdOptions *relopts; + float8 cleanup_scale_factor; /* * If table receives large enough amount of insertions and no cleanup @@ -825,14 +825,14 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) */ relopts = (StdRdOptions *) info->index->rd_options; cleanup_scale_factor = (relopts && - relopts->vacuum_cleanup_index_scale_factor >= 0) - ? relopts->vacuum_cleanup_index_scale_factor - : vacuum_cleanup_index_scale_factor; + relopts->vacuum_cleanup_index_scale_factor >= 0) + ? relopts->vacuum_cleanup_index_scale_factor + : vacuum_cleanup_index_scale_factor; if (cleanup_scale_factor < 0 || metad->btm_last_cleanup_num_heap_tuples < 0 || info->num_heap_tuples > (1.0 + cleanup_scale_factor) * - metad->btm_last_cleanup_num_heap_tuples) + metad->btm_last_cleanup_num_heap_tuples) result = true; } @@ -862,7 +862,7 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* The ENSURE stuff ensures we clean up shared memory on failure */ PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel)); { - TransactionId oldestBtpoXact; + TransactionId oldestBtpoXact; cycleid = _bt_start_vacuum(rel); @@ -907,7 +907,7 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) */ if (stats == NULL) { - TransactionId oldestBtpoXact; + TransactionId oldestBtpoXact; /* Check if we need a cleanup */ if (!_bt_vacuum_needs_cleanup(info)) diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 7deda9acac..0587e42573 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -897,10 +897,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) /* * Truncate any non-key attributes from high key on leaf level * (i.e. truncate on leaf level if we're building an INCLUDE - * index). This is only done at the leaf level because - * downlinks in internal pages are either negative infinity - * items, or get their contents from copying from one level - * down. See also: _bt_split(). + * index). This is only done at the leaf level because downlinks + * in internal pages are either negative infinity items, or get + * their contents from copying from one level down. See also: + * _bt_split(). * * Since the truncated tuple is probably smaller than the * original, it cannot just be copied in place (besides, we want @@ -908,11 +908,11 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) * original high key, and add our own truncated high key at the * same offset. * - * Note that the page layout won't be changed very much. oitup - * is already located at the physical beginning of tuple space, - * so we only shift the line pointer array back and forth, and - * overwrite the latter portion of the space occupied by the - * original tuple. This is fairly cheap. + * Note that the page layout won't be changed very much. oitup is + * already located at the physical beginning of tuple space, so we + * only shift the line pointer array back and forth, and overwrite + * the latter portion of the space occupied by the original tuple. + * This is fairly cheap. */ truncated = _bt_nonkey_truncate(wstate->index, oitup); truncsz = IndexTupleSize(truncated); @@ -978,7 +978,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) */ if (last_off == P_HIKEY) { - BTPageOpaque npageop; + BTPageOpaque npageop; Assert(state->btps_minkey == NULL); diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 0cecbf8e38..acb944357a 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -2101,12 +2101,12 @@ btproperty(Oid index_oid, int attno, IndexTuple _bt_nonkey_truncate(Relation rel, IndexTuple itup) { - int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel); - IndexTuple truncated; + int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel); + IndexTuple truncated; /* - * We should only ever truncate leaf index tuples, which must have both key - * and non-key attributes. It's never okay to truncate a second time. + * We should only ever truncate leaf index tuples, which must have both + * key and non-key attributes. It's never okay to truncate a second time. */ Assert(BTreeTupleGetNAtts(itup, rel) == IndexRelationGetNumberOfAttributes(rel)); @@ -2133,10 +2133,10 @@ _bt_nonkey_truncate(Relation rel, IndexTuple itup) bool _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) { - int16 natts = IndexRelationGetNumberOfAttributes(rel); - int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); - BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); - IndexTuple itup; + int16 natts = IndexRelationGetNumberOfAttributes(rel); + int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); + BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); + IndexTuple itup; /* * We cannot reliably test a deleted or half-deleted page, since they have @@ -2147,6 +2147,7 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) Assert(offnum >= FirstOffsetNumber && offnum <= PageGetMaxOffsetNumber(page)); + /* * Mask allocated for number of keys in index tuple must be able to fit * maximum possible number of index attributes @@ -2178,29 +2179,29 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) return BTreeTupleGetNAtts(itup, rel) == nkeyatts; } } - else /* !P_ISLEAF(opaque) */ + else /* !P_ISLEAF(opaque) */ { if (offnum == P_FIRSTDATAKEY(opaque)) { /* * The first tuple on any internal page (possibly the first after - * its high key) is its negative infinity tuple. Negative infinity - * tuples are always truncated to zero attributes. They are a - * particular kind of pivot tuple. + * its high key) is its negative infinity tuple. Negative + * infinity tuples are always truncated to zero attributes. They + * are a particular kind of pivot tuple. * * The number of attributes won't be explicitly represented if the * negative infinity tuple was generated during a page split that - * occurred with a version of Postgres before v11. There must be a - * problem when there is an explicit representation that is + * occurred with a version of Postgres before v11. There must be + * a problem when there is an explicit representation that is * non-zero, or when there is no explicit representation and the * tuple is evidently not a pre-pg_upgrade tuple. * - * Prior to v11, downlinks always had P_HIKEY as their offset. Use - * that to decide if the tuple is a pre-v11 tuple. + * Prior to v11, downlinks always had P_HIKEY as their offset. + * Use that to decide if the tuple is a pre-v11 tuple. */ return BTreeTupleGetNAtts(itup, rel) == 0 || - ((itup->t_info & INDEX_ALT_TID_MASK) == 0 && - ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY); + ((itup->t_info & INDEX_ALT_TID_MASK) == 0 && + ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY); } else { diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 7bf26f8bae..098e09c574 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -1908,11 +1908,12 @@ spgdoinsert(Relation index, SpGistState *state, /* * Prepare the leaf datum to insert. * - * If an optional "compress" method is provided, then call it to form - * the leaf datum from the input datum. Otherwise store the input datum as - * is. Since we don't use index_form_tuple in this AM, we have to make sure - * value to be inserted is not toasted; FormIndexDatum doesn't guarantee - * that. But we assume the "compress" method to return an untoasted value. + * If an optional "compress" method is provided, then call it to form the + * leaf datum from the input datum. Otherwise store the input datum as + * is. Since we don't use index_form_tuple in this AM, we have to make + * sure value to be inserted is not toasted; FormIndexDatum doesn't + * guarantee that. But we assume the "compress" method to return an + * untoasted value. */ if (!isnull) { diff --git a/src/backend/access/spgist/spgvalidate.c b/src/backend/access/spgist/spgvalidate.c index 8bbed7ff32..619c357115 100644 --- a/src/backend/access/spgist/spgvalidate.c +++ b/src/backend/access/spgist/spgvalidate.c @@ -53,7 +53,7 @@ spgvalidate(Oid opclassoid) OpFamilyOpFuncGroup *opclassgroup; int i; ListCell *lc; - spgConfigIn configIn; + spgConfigIn configIn; spgConfigOut configOut; Oid configOutLefttype = InvalidOid; Oid configOutRighttype = InvalidOid; @@ -119,9 +119,9 @@ spgvalidate(Oid opclassoid) configOutRighttype = procform->amprocrighttype; /* - * When leaf and attribute types are the same, compress function - * is not required and we set corresponding bit in functionset - * for later group consistency check. + * When leaf and attribute types are the same, compress + * function is not required and we set corresponding bit in + * functionset for later group consistency check. */ if (!OidIsValid(configOut.leafType) || configOut.leafType == configIn.attType) diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 5c05d545c4..cdd8156ce4 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -913,7 +913,7 @@ typedef struct TwoPhaseFileHeader bool initfileinval; /* does relcache init file need invalidation? */ uint16 gidlen; /* length of the GID - GID follows the header */ XLogRecPtr origin_lsn; /* lsn of this record at origin node */ - TimestampTz origin_timestamp; /* time of prepare at origin node */ + TimestampTz origin_timestamp; /* time of prepare at origin node */ } TwoPhaseFileHeader; /* @@ -1065,7 +1065,7 @@ EndPrepare(GlobalTransaction gxact) { TwoPhaseFileHeader *hdr; StateFileChunk *record; - bool replorigin; + bool replorigin; /* Add the end sentinel to the list of 2PC records */ RegisterTwoPhaseRecord(TWOPHASE_RM_END_ID, 0, @@ -1317,7 +1317,7 @@ void ParsePrepareRecord(uint8 info, char *xlrec, xl_xact_parsed_prepare *parsed) { TwoPhaseFileHeader *hdr; - char *bufptr; + char *bufptr; hdr = (TwoPhaseFileHeader *) xlrec; bufptr = xlrec + MAXALIGN(sizeof(TwoPhaseFileHeader)); diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 4747353bb9..c38de0c5fe 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -3267,8 +3267,8 @@ bool IsInTransactionBlock(bool isTopLevel) { /* - * Return true on same conditions that would make PreventInTransactionBlock - * error out + * Return true on same conditions that would make + * PreventInTransactionBlock error out */ if (IsTransactionBlock()) return true; @@ -5448,9 +5448,9 @@ XactLogAbortRecord(TimestampTz abort_time, } /* dump transaction origin information only for abort prepared */ - if ( (replorigin_session_origin != InvalidRepOriginId) && - TransactionIdIsValid(twophase_xid) && - XLogLogicalInfoActive()) + if ((replorigin_session_origin != InvalidRepOriginId) && + TransactionIdIsValid(twophase_xid) && + XLogLogicalInfoActive()) { xl_xinfo.xinfo |= XACT_XINFO_HAS_ORIGIN; diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 08dc9ba031..c0923d97f2 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -10656,10 +10656,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, * Mark that start phase has correctly finished for an exclusive backup. * Session-level locks are updated as well to reflect that state. * - * Note that CHECK_FOR_INTERRUPTS() must not occur while updating - * backup counters and session-level lock. Otherwise they can be - * updated inconsistently, and which might cause do_pg_abort_backup() - * to fail. + * Note that CHECK_FOR_INTERRUPTS() must not occur while updating backup + * counters and session-level lock. Otherwise they can be updated + * inconsistently, and which might cause do_pg_abort_backup() to fail. */ if (exclusive) { @@ -10904,11 +10903,11 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) /* * Clean up session-level lock. * - * You might think that WALInsertLockRelease() can be called - * before cleaning up session-level lock because session-level - * lock doesn't need to be protected with WAL insertion lock. - * But since CHECK_FOR_INTERRUPTS() can occur in it, - * session-level lock must be cleaned up before it. + * You might think that WALInsertLockRelease() can be called before + * cleaning up session-level lock because session-level lock doesn't need + * to be protected with WAL insertion lock. But since + * CHECK_FOR_INTERRUPTS() can occur in it, session-level lock must be + * cleaned up before it. */ sessionBackupState = SESSION_BACKUP_NONE; @@ -11042,6 +11041,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename); fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n", (uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename); + /* * Transfer remaining lines including label and start timeline to * history file. @@ -11259,7 +11259,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, bool *backupFromStandby) { char startxlogfilename[MAXFNAMELEN]; - TimeLineID tli_from_walseg, tli_from_file; + TimeLineID tli_from_walseg, + tli_from_file; FILE *lfp; char ch; char backuptype[20]; @@ -11322,13 +11323,13 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, } /* - * Parse START TIME and LABEL. Those are not mandatory fields for - * recovery but checking for their presence is useful for debugging - * and the next sanity checks. Cope also with the fact that the - * result buffers have a pre-allocated size, hence if the backup_label - * file has been generated with strings longer than the maximum assumed - * here an incorrect parsing happens. That's fine as only minor - * consistency checks are done afterwards. + * Parse START TIME and LABEL. Those are not mandatory fields for recovery + * but checking for their presence is useful for debugging and the next + * sanity checks. Cope also with the fact that the result buffers have a + * pre-allocated size, hence if the backup_label file has been generated + * with strings longer than the maximum assumed here an incorrect parsing + * happens. That's fine as only minor consistency checks are done + * afterwards. */ if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1) ereport(DEBUG1, @@ -11341,8 +11342,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, backuplabel, BACKUP_LABEL_FILE))); /* - * START TIMELINE is new as of 11. Its parsing is not mandatory, still - * use it as a sanity check if present. + * START TIMELINE is new as of 11. Its parsing is not mandatory, still use + * it as a sanity check if present. */ if (fscanf(lfp, "START TIMELINE: %u\n", &tli_from_file) == 1) { diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 0ace1968df..578e4c6592 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -446,6 +446,7 @@ ExecuteGrantStmt(GrantStmt *stmt) switch (stmt->objtype) { case OBJECT_TABLE: + /* * Because this might be a sequence, we test both relation and * sequence bits, and later do a more limited test when we know @@ -3458,7 +3459,7 @@ aclcheck_error(AclResult aclerr, ObjectType objtype, case OBJECT_VIEW: msg = gettext_noop("permission denied for view %s"); break; - /* these currently aren't used */ + /* these currently aren't used */ case OBJECT_ACCESS_METHOD: case OBJECT_AMOP: case OBJECT_AMPROC: @@ -3583,11 +3584,13 @@ aclcheck_error(AclResult aclerr, ObjectType objtype, case OBJECT_TSDICTIONARY: msg = gettext_noop("must be owner of text search dictionary %s"); break; - /* - * Special cases: For these, the error message talks about - * "relation", because that's where the ownership is - * attached. See also check_object_ownership(). - */ + + /* + * Special cases: For these, the error message talks + * about "relation", because that's where the + * ownership is attached. See also + * check_object_ownership(). + */ case OBJECT_COLUMN: case OBJECT_POLICY: case OBJECT_RULE: @@ -3595,7 +3598,7 @@ aclcheck_error(AclResult aclerr, ObjectType objtype, case OBJECT_TRIGGER: msg = gettext_noop("must be owner of relation %s"); break; - /* these currently aren't used */ + /* these currently aren't used */ case OBJECT_ACCESS_METHOD: case OBJECT_AMOP: case OBJECT_AMPROC: diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index c23cfdaf6b..4f1d365357 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -631,9 +631,9 @@ findDependentObjects(const ObjectAddress *object, * transform this deletion request into a delete of this * owning object. * - * For INTERNAL_AUTO dependencies, we don't enforce this; - * in other words, we don't follow the links back to the - * owning object. + * For INTERNAL_AUTO dependencies, we don't enforce this; in + * other words, we don't follow the links back to the owning + * object. */ if (foundDep->deptype == DEPENDENCY_INTERNAL_AUTO) break; diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index dec4265d68..6c40b29b3f 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -377,7 +377,7 @@ ConstructTupleDescriptor(Relation heapRelation, to->attislocal = true; to->attinhcount = 0; to->attcollation = (i < numkeyatts) ? - collationObjectId[i] : InvalidOid; + collationObjectId[i] : InvalidOid; } else { @@ -414,7 +414,7 @@ ConstructTupleDescriptor(Relation heapRelation, to->atttypmod = exprTypmod(indexkey); to->attislocal = true; to->attcollation = (i < numkeyatts) ? - collationObjectId[i] : InvalidOid; + collationObjectId[i] : InvalidOid; ReleaseSysCache(tuple); @@ -1023,21 +1023,21 @@ index_create(Relation heapRelation, } localaddr = index_constraint_create(heapRelation, - indexRelationId, - parentConstraintId, - indexInfo, - indexRelationName, - constraintType, - constr_flags, - allow_system_table_mods, - is_internal); + indexRelationId, + parentConstraintId, + indexInfo, + indexRelationName, + constraintType, + constr_flags, + allow_system_table_mods, + is_internal); if (constraintId) *constraintId = localaddr.objectId; } else { bool have_simple_col = false; - DependencyType deptype; + DependencyType deptype; deptype = OidIsValid(parentIndexRelid) ? DEPENDENCY_INTERNAL_AUTO : DEPENDENCY_AUTO; @@ -1340,12 +1340,12 @@ index_constraint_create(Relation heapRelation, recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL); /* - * Also, if this is a constraint on a partition, mark it as depending - * on the constraint in the parent. + * Also, if this is a constraint on a partition, mark it as depending on + * the constraint in the parent. */ if (OidIsValid(parentConstraintId)) { - ObjectAddress parentConstr; + ObjectAddress parentConstr; ObjectAddressSet(parentConstr, ConstraintRelationId, parentConstraintId); recordDependencyOn(&referenced, &parentConstr, DEPENDENCY_INTERNAL_AUTO); @@ -1822,7 +1822,7 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2, Oid *opfamilies1, Oid *opfamilies2, AttrNumber *attmap, int maplen) { - int i; + int i; if (info1->ii_Unique != info2->ii_Unique) return false; @@ -1854,7 +1854,7 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2, /* ignore expressions at this stage */ if ((info1->ii_IndexAttrNumbers[i] != InvalidAttrNumber) && (attmap[info2->ii_IndexAttrNumbers[i] - 1] != - info1->ii_IndexAttrNumbers[i])) + info1->ii_IndexAttrNumbers[i])) return false; /* collation and opfamily is not valid for including columns */ @@ -1875,8 +1875,8 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2, return false; if (info1->ii_Expressions != NIL) { - bool found_whole_row; - Node *mapped; + bool found_whole_row; + Node *mapped; mapped = map_variable_attnos((Node *) info2->ii_Expressions, 1, 0, attmap, maplen, @@ -1899,8 +1899,8 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2, return false; if (info1->ii_Predicate != NULL) { - bool found_whole_row; - Node *mapped; + bool found_whole_row; + Node *mapped; mapped = map_variable_attnos((Node *) info2->ii_Predicate, 1, 0, attmap, maplen, @@ -2105,11 +2105,11 @@ index_update_stats(Relation rel, * It is safe to use a non-transactional update even though our * transaction could still fail before committing. Setting relhasindex * true is safe even if there are no indexes (VACUUM will eventually fix - * it). And of course the new relpages and - * reltuples counts are correct regardless. However, we don't want to - * change relpages (or relallvisible) if the caller isn't providing an - * updated reltuples count, because that would bollix the - * reltuples/relpages ratio which is what's really important. + * it). And of course the new relpages and reltuples counts are correct + * regardless. However, we don't want to change relpages (or + * relallvisible) if the caller isn't providing an updated reltuples + * count, because that would bollix the reltuples/relpages ratio which is + * what's really important. */ pg_class = heap_open(RelationRelationId, RowExclusiveLock); @@ -4136,7 +4136,7 @@ RestoreReindexState(void *reindexstate) { SerializedReindexState *sistate = (SerializedReindexState *) reindexstate; int c = 0; - MemoryContext oldcontext; + MemoryContext oldcontext; currentlyReindexedHeap = sistate->currentlyReindexedHeap; currentlyReindexedIndex = sistate->currentlyReindexedIndex; diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index cd96501cbb..ef3ea64bd0 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -2062,8 +2062,8 @@ pg_get_object_address(PG_FUNCTION_ARGS) } /* - * get_object_address is pretty sensitive to the length of its input lists; - * check that they're what it wants. + * get_object_address is pretty sensitive to the length of its input + * lists; check that they're what it wants. */ switch (type) { @@ -5130,7 +5130,11 @@ get_relkind_objtype(char relkind) return OBJECT_MATVIEW; case RELKIND_FOREIGN_TABLE: return OBJECT_FOREIGN_TABLE; - /* other relkinds are not supported here because they don't map to OBJECT_* values */ + + /* + * other relkinds are not supported here because they don't map to + * OBJECT_* values + */ default: elog(ERROR, "unexpected relkind: %d", relkind); return 0; diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c index de801ad788..558022647c 100644 --- a/src/backend/catalog/partition.c +++ b/src/backend/catalog/partition.c @@ -205,7 +205,7 @@ map_partition_varattnos(List *expr, int fromrel_varno, bool has_partition_attrs(Relation rel, Bitmapset *attnums, bool *used_in_expr) { - PartitionKey key; + PartitionKey key; int partnatts; List *partexprs; ListCell *partexprs_item; diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index c5b5395791..7a6d158f89 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -419,8 +419,8 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned) Relation pg_constraint; Relation parentRel; Relation rel; - ScanKeyData key; - SysScanDesc scan; + ScanKeyData key; + SysScanDesc scan; TupleDesc tupdesc; HeapTuple tuple; AttrNumber *attmap; @@ -448,7 +448,7 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned) while ((tuple = systable_getnext(scan)) != NULL) { - Form_pg_constraint constrForm = (Form_pg_constraint) GETSTRUCT(tuple); + Form_pg_constraint constrForm = (Form_pg_constraint) GETSTRUCT(tuple); AttrNumber conkey[INDEX_MAX_KEYS]; AttrNumber mapped_conkey[INDEX_MAX_KEYS]; AttrNumber confkey[INDEX_MAX_KEYS]; @@ -573,8 +573,8 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned) nelem, nelem, InvalidOid, /* not a domain constraint */ - constrForm->conindid, /* same index */ - constrForm->confrelid, /* same foreign rel */ + constrForm->conindid, /* same index */ + constrForm->confrelid, /* same foreign rel */ confkey, conpfeqop, conppeqop, @@ -606,8 +606,8 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned) if (cloned) { /* - * Feed back caller about the constraints we created, so that they can - * set up constraint verification. + * Feed back caller about the constraints we created, so that they + * can set up constraint verification. */ newc = palloc(sizeof(ClonedConstraint)); newc->relid = relationId; @@ -625,7 +625,7 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - PartitionDesc partdesc = RelationGetPartitionDesc(rel); + PartitionDesc partdesc = RelationGetPartitionDesc(rel); int i; for (i = 0; i < partdesc->nparts; i++) @@ -634,7 +634,7 @@ CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned) cloned); } - heap_close(rel, NoLock); /* keep lock till commit */ + heap_close(rel, NoLock); /* keep lock till commit */ heap_close(parentRel, NoLock); heap_close(pg_constraint, RowShareLock); } @@ -1020,12 +1020,12 @@ AlterConstraintNamespaces(Oid ownerId, Oid oldNspId, void ConstraintSetParentConstraint(Oid childConstrId, Oid parentConstrId) { - Relation constrRel; + Relation constrRel; Form_pg_constraint constrForm; - HeapTuple tuple, - newtup; - ObjectAddress depender; - ObjectAddress referenced; + HeapTuple tuple, + newtup; + ObjectAddress depender; + ObjectAddress referenced; constrRel = heap_open(ConstraintRelationId, RowExclusiveLock); tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(childConstrId)); @@ -1212,8 +1212,8 @@ Oid get_relation_idx_constraint_oid(Oid relationId, Oid indexId) { Relation pg_constraint; - SysScanDesc scan; - ScanKeyData key; + SysScanDesc scan; + ScanKeyData key; HeapTuple tuple; Oid constraintId = InvalidOid; @@ -1228,7 +1228,7 @@ get_relation_idx_constraint_oid(Oid relationId, Oid indexId) true, NULL, 1, &key); while ((tuple = systable_getnext(scan)) != NULL) { - Form_pg_constraint constrForm; + Form_pg_constraint constrForm; constrForm = (Form_pg_constraint) GETSTRUCT(tuple); if (constrForm->conindid == indexId) diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c index 6160804ef8..85baca54cc 100644 --- a/src/backend/catalog/pg_inherits.c +++ b/src/backend/catalog/pg_inherits.c @@ -448,7 +448,7 @@ StoreSingleInheritance(Oid relationId, Oid parentOid, int32 seqNumber) bool DeleteInheritsTuple(Oid inhrelid, Oid inhparent) { - bool found = false; + bool found = false; Relation catalogRelation; ScanKeyData key; SysScanDesc scan; diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 0d63866fb0..eff325cc7d 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -942,7 +942,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) /* Superusers can bypass permission checks */ if (!superuser()) { - ObjectType objtype = get_object_type(classId, objectId); + ObjectType objtype = get_object_type(classId, objectId); /* must be owner */ if (!has_privs_of_role(GetUserId(), old_ownerId)) diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index d088dc11a6..482d463420 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -1539,8 +1539,8 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, frozenXid, cutoffMulti, mapped_tables); /* - * If it's a system catalog, queue a sinval message to flush all - * catcaches on the catalog when we reach CommandCounterIncrement. + * If it's a system catalog, queue a sinval message to flush all catcaches + * on the catalog when we reach CommandCounterIncrement. */ if (is_system_catalog) CacheInvalidateCatalog(OIDOldHeap); diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 99479eed66..770c75fe2c 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -2783,7 +2783,7 @@ CopyFrom(CopyState cstate) slot, NULL); - if (slot == NULL) /* "do nothing" */ + if (slot == NULL) /* "do nothing" */ goto next_tuple; /* FDW might have changed tuple */ diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 45baf7c13f..eecc85d14e 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -2184,7 +2184,7 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) "GRANT" : "REVOKE"); /* object_type */ values[i++] = CStringGetTextDatum(stringify_grant_objtype( - cmd->d.grant.istmt->objtype)); + cmd->d.grant.istmt->objtype)); /* schema */ nulls[i++] = true; /* identity */ @@ -2244,7 +2244,7 @@ stringify_grant_objtype(ObjectType objtype) return "TABLESPACE"; case OBJECT_TYPE: return "TYPE"; - /* these currently aren't used */ + /* these currently aren't used */ case OBJECT_ACCESS_METHOD: case OBJECT_AGGREGATE: case OBJECT_AMOP: @@ -2326,7 +2326,7 @@ stringify_adefprivs_objtype(ObjectType objtype) return "TABLESPACES"; case OBJECT_TYPE: return "TYPES"; - /* these currently aren't used */ + /* these currently aren't used */ case OBJECT_ACCESS_METHOD: case OBJECT_AGGREGATE: case OBJECT_AMOP: diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 3c74873eeb..cc229bbecf 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -305,7 +305,7 @@ interpret_function_parameter_list(ParseState *pstate, { if (objtype == OBJECT_PROCEDURE) *requiredResultType = RECORDOID; - else if (outCount == 0) /* save first output param's type */ + else if (outCount == 0) /* save first output param's type */ *requiredResultType = toid; outCount++; } diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index dda0dcb8aa..c4aa4c0974 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -326,7 +326,7 @@ DefineIndex(Oid relationId, IndexStmt *stmt, Oid indexRelationId, Oid parentIndexId, - Oid parentConstraintId, + Oid parentConstraintId, bool is_alter_table, bool check_rights, bool check_not_in_use, @@ -381,11 +381,11 @@ DefineIndex(Oid relationId, /* * Calculate the new list of index columns including both key columns and - * INCLUDE columns. Later we can determine which of these are key columns, - * and which are just part of the INCLUDE list by checking the list - * position. A list item in a position less than ii_NumIndexKeyAttrs is - * part of the key columns, and anything equal to and over is part of the - * INCLUDE columns. + * INCLUDE columns. Later we can determine which of these are key + * columns, and which are just part of the INCLUDE list by checking the + * list position. A list item in a position less than ii_NumIndexKeyAttrs + * is part of the key columns, and anything equal to and over is part of + * the INCLUDE columns. */ allIndexParams = list_concat(list_copy(stmt->indexParams), list_copy(stmt->indexIncludingParams)); @@ -431,6 +431,7 @@ DefineIndex(Oid relationId, /* OK */ break; case RELKIND_FOREIGN_TABLE: + /* * Custom error message for FOREIGN TABLE since the term is close * to a regular table and can confuse the user. @@ -691,13 +692,13 @@ DefineIndex(Oid relationId, * partition-local index can enforce global uniqueness iff the PK * value completely determines the partition that a row is in. * - * Thus, verify that all the columns in the partition key appear - * in the unique key definition. + * Thus, verify that all the columns in the partition key appear in + * the unique key definition. */ for (i = 0; i < key->partnatts; i++) { - bool found = false; - int j; + bool found = false; + int j; const char *constraint_type; if (stmt->primary) @@ -722,7 +723,7 @@ DefineIndex(Oid relationId, errmsg("unsupported %s constraint with partition key definition", constraint_type), errdetail("%s constraints cannot be used when partition keys include expressions.", - constraint_type))); + constraint_type))); for (j = 0; j < indexInfo->ii_NumIndexAttrs; j++) { @@ -820,8 +821,8 @@ DefineIndex(Oid relationId, /* * Make the catalog entries for the index, including constraints. This * step also actually builds the index, except if caller requested not to - * or in concurrent mode, in which case it'll be done later, or - * doing a partitioned index (because those don't have storage). + * or in concurrent mode, in which case it'll be done later, or doing a + * partitioned index (because those don't have storage). */ flags = constr_flags = 0; if (stmt->isconstraint) @@ -871,8 +872,8 @@ DefineIndex(Oid relationId, if (partitioned) { /* - * Unless caller specified to skip this step (via ONLY), process - * each partition to make sure they all contain a corresponding index. + * Unless caller specified to skip this step (via ONLY), process each + * partition to make sure they all contain a corresponding index. * * If we're called internally (no stmt->relation), recurse always. */ @@ -904,13 +905,13 @@ DefineIndex(Oid relationId, */ for (i = 0; i < nparts; i++) { - Oid childRelid = part_oids[i]; - Relation childrel; - List *childidxs; - ListCell *cell; + Oid childRelid = part_oids[i]; + Relation childrel; + List *childidxs; + ListCell *cell; AttrNumber *attmap; - bool found = false; - int maplen; + bool found = false; + int maplen; childrel = heap_open(childRelid, lockmode); childidxs = RelationGetIndexList(childrel); @@ -940,7 +941,7 @@ DefineIndex(Oid relationId, opfamOids, attmap, maplen)) { - Oid cldConstrOid = InvalidOid; + Oid cldConstrOid = InvalidOid; /* * Found a match. @@ -1002,7 +1003,7 @@ DefineIndex(Oid relationId, childStmt->idxname = NULL; childStmt->relationId = childRelid; DefineIndex(childRelid, childStmt, - InvalidOid, /* no predefined OID */ + InvalidOid, /* no predefined OID */ indexRelationId, /* this is our child */ createdConstraintId, is_alter_table, check_rights, check_not_in_use, @@ -1014,9 +1015,8 @@ DefineIndex(Oid relationId, /* * The pg_index row we inserted for this index was marked - * indisvalid=true. But if we attached an existing index that - * is invalid, this is incorrect, so update our row to - * invalid too. + * indisvalid=true. But if we attached an existing index that is + * invalid, this is incorrect, so update our row to invalid too. */ if (invalidate_parent) { @@ -1479,7 +1479,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, } else { - indexInfo->ii_IndexAttrNumbers[attn] = 0; /* marks expression */ + indexInfo->ii_IndexAttrNumbers[attn] = 0; /* marks expression */ indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions, expr); @@ -1505,7 +1505,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo, typeOidP[attn] = atttype; /* - * Included columns have no collation, no opclass and no ordering options. + * Included columns have no collation, no opclass and no ordering + * options. */ if (attn >= nkeycols) { @@ -2465,8 +2466,8 @@ void IndexSetParentIndex(Relation partitionIdx, Oid parentOid) { Relation pg_inherits; - ScanKeyData key[2]; - SysScanDesc scan; + ScanKeyData key[2]; + SysScanDesc scan; Oid partRelid = RelationGetRelid(partitionIdx); HeapTuple tuple; bool fix_dependencies; @@ -2496,15 +2497,15 @@ IndexSetParentIndex(Relation partitionIdx, Oid parentOid) if (parentOid == InvalidOid) { /* - * No pg_inherits row, and no parent wanted: nothing to do in - * this case. + * No pg_inherits row, and no parent wanted: nothing to do in this + * case. */ fix_dependencies = false; } else { - Datum values[Natts_pg_inherits]; - bool isnull[Natts_pg_inherits]; + Datum values[Natts_pg_inherits]; + bool isnull[Natts_pg_inherits]; /* * No pg_inherits row exists, and we want a parent for this index, @@ -2525,7 +2526,7 @@ IndexSetParentIndex(Relation partitionIdx, Oid parentOid) } else { - Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(tuple); + Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(tuple); if (parentOid == InvalidOid) { @@ -2572,14 +2573,14 @@ IndexSetParentIndex(Relation partitionIdx, Oid parentOid) if (OidIsValid(parentOid)) { - ObjectAddress parentIdx; + ObjectAddress parentIdx; ObjectAddressSet(parentIdx, RelationRelationId, parentOid); recordDependencyOn(&partIdx, &parentIdx, DEPENDENCY_INTERNAL_AUTO); } else { - ObjectAddress partitionTbl; + ObjectAddress partitionTbl; ObjectAddressSet(partitionTbl, RelationRelationId, partitionIdx->rd_index->indrelid); diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c index 8a2aa453ec..71278b38cf 100644 --- a/src/backend/commands/lockcmds.c +++ b/src/backend/commands/lockcmds.c @@ -181,7 +181,7 @@ typedef struct bool nowait; /* no wait mode */ Oid viewowner; /* view owner for checking the privilege */ Oid viewoid; /* OID of the view to be locked */ - List *ancestor_views; /* OIDs of ancestor views */ + List *ancestor_views; /* OIDs of ancestor views */ } LockViewRecurse_context; static bool diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index b2b845613d..cee0ef915b 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -215,7 +215,7 @@ RelationBuildRowSecurity(Relation relation) HeapTuple tuple; MemoryContextCopyAndSetIdentifier(rscxt, - RelationGetRelationName(relation)); + RelationGetRelationName(relation)); rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc)); rsdesc->rscxt = rscxt; diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index 73821502ba..568499761f 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -450,9 +450,9 @@ PersistHoldablePortal(Portal portal) PopActiveSnapshot(); /* - * We can now release any subsidiary memory of the portal's context; - * we'll never use it again. The executor already dropped its context, - * but this will clean up anything that glommed onto the portal's context via + * We can now release any subsidiary memory of the portal's context; we'll + * never use it again. The executor already dropped its context, but this + * will clean up anything that glommed onto the portal's context via * PortalContext. */ MemoryContextDeleteChildren(portal->portalContext); diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index c4adfd569e..3bb0d24cd2 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -133,7 +133,8 @@ CreateStatistics(CreateStatsStmt *stmt) * If the node has a name, split it up and determine creation namespace. * If not (a possibility not considered by the grammar, but one which can * occur via the "CREATE TABLE ... (LIKE)" command), then we put the - * object in the same namespace as the relation, and cons up a name for it. + * object in the same namespace as the relation, and cons up a name for + * it. */ if (stmt->defnames) namespaceId = QualifiedNameGetCreationNamespace(stmt->defnames, @@ -462,7 +463,7 @@ ChooseExtendedStatisticName(const char *name1, const char *name2, for (;;) { - Oid existingstats; + Oid existingstats; stxname = makeObjectName(name1, name2, modlabel); @@ -500,7 +501,7 @@ ChooseExtendedStatisticNameAddition(List *exprs) buf[0] = '\0'; foreach(lc, exprs) { - ColumnRef *cref = (ColumnRef *) lfirst(lc); + ColumnRef *cref = (ColumnRef *) lfirst(lc); const char *name; /* It should be one of these, but just skip if it happens not to be */ diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 2c23371a19..0e95037dcf 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -1634,7 +1634,8 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, } /* - * Write a WAL record to allow this set of actions to be logically decoded. + * Write a WAL record to allow this set of actions to be logically + * decoded. * * Assemble an array of relids so we can write a single WAL record for the * whole action. @@ -1648,7 +1649,7 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, Assert(XLogLogicalInfoActive()); logrelids = palloc(list_length(relids_logged) * sizeof(Oid)); - foreach (cell, relids_logged) + foreach(cell, relids_logged) logrelids[i++] = lfirst_oid(cell); xlrec.dbId = MyDatabaseId; @@ -5560,8 +5561,8 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, CommandCounterIncrement(); /* - * Did the request for a missing value work? If not we'll have to do - * a rewrite + * Did the request for a missing value work? If not we'll have to do a + * rewrite */ if (!rawEnt->missingMode) tab->rewrite |= AT_REWRITE_DEFAULT_VAL; @@ -7664,9 +7665,9 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, ObjectAddressSet(address, ConstraintRelationId, constrOid); /* - * Create the triggers that will enforce the constraint. We only want - * the action triggers to appear for the parent partitioned relation, - * even though the constraints also exist below. + * Create the triggers that will enforce the constraint. We only want the + * action triggers to appear for the parent partitioned relation, even + * though the constraints also exist below. */ createForeignKeyTriggers(rel, RelationGetRelid(pkrel), fkconstraint, constrOid, indexOid, !recursing); @@ -8793,8 +8794,8 @@ createForeignKeyTriggers(Relation rel, Oid refRelOid, Constraint *fkconstraint, indexOid); /* - * For the referencing side, create the check triggers. We only need these - * on the partitions. + * For the referencing side, create the check triggers. We only need + * these on the partitions. */ if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) createForeignKeyCheckTriggers(RelationGetRelid(rel), refRelOid, @@ -13974,8 +13975,9 @@ QueuePartitionConstraintValidation(List **wqueue, Relation scanrel, } /* - * Constraints proved insufficient. For plain relations, queue a validation - * item now; for partitioned tables, recurse to process each partition. + * Constraints proved insufficient. For plain relations, queue a + * validation item now; for partitioned tables, recurse to process each + * partition. */ if (scanrel->rd_rel->relkind == RELKIND_RELATION) { @@ -14300,9 +14302,9 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) /* * If we're attaching a partition other than the default partition and a * default one exists, then that partition's partition constraint changes, - * so add an entry to the work queue to validate it, too. (We must not - * do this when the partition being attached is the default one; we - * already did it above!) + * so add an entry to the work queue to validate it, too. (We must not do + * this when the partition being attached is the default one; we already + * did it above!) */ if (OidIsValid(defaultPartOid)) { @@ -14408,8 +14410,8 @@ AttachPartitionEnsureIndexes(Relation rel, Relation attachrel) */ for (i = 0; i < list_length(attachRelIdxs); i++) { - Oid cldIdxId = RelationGetRelid(attachrelIdxRels[i]); - Oid cldConstrOid = InvalidOid; + Oid cldIdxId = RelationGetRelid(attachrelIdxRels[i]); + Oid cldConstrOid = InvalidOid; /* does this index have a parent? if so, can't use it */ if (attachrelIdxRels[i]->rd_rel->relispartition) @@ -14693,7 +14695,7 @@ ATExecDetachPartition(Relation rel, RangeVar *name) continue; Assert((IndexGetRelation(get_partition_parent(idxid), false) == - RelationGetRelid(rel))); + RelationGetRelid(rel))); idx = index_open(idxid, AccessExclusiveLock); IndexSetParentIndex(idx, InvalidOid); @@ -14722,9 +14724,9 @@ ATExecDetachPartition(Relation rel, RangeVar *name) */ struct AttachIndexCallbackState { - Oid partitionOid; - Oid parentTblOid; - bool lockedParentTbl; + Oid partitionOid; + Oid parentTblOid; + bool lockedParentTbl; }; static void @@ -14836,7 +14838,8 @@ ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name) cldConstrId = InvalidOid; /* - * If this partition already has an index attached, refuse the operation. + * If this partition already has an index attached, refuse the + * operation. */ refuseDupeIndexAttach(parentIdx, partIdx, partTbl); @@ -14890,8 +14893,8 @@ ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name) errdetail("The index definitions do not match."))); /* - * If there is a constraint in the parent, make sure there is one - * in the child too. + * If there is a constraint in the parent, make sure there is one in + * the child too. */ constraintOid = get_relation_idx_constraint_oid(RelationGetRelid(parentTbl), RelationGetRelid(parentIdx)); @@ -14907,9 +14910,9 @@ ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name) RelationGetRelationName(partIdx), RelationGetRelationName(parentIdx)), errdetail("The index \"%s\" belongs to a constraint in table \"%s\" but no constraint exists for index \"%s\".", - RelationGetRelationName(parentIdx), - RelationGetRelationName(parentTbl), - RelationGetRelationName(partIdx)))); + RelationGetRelationName(parentIdx), + RelationGetRelationName(parentTbl), + RelationGetRelationName(partIdx)))); } /* All good -- do it */ @@ -14938,10 +14941,10 @@ ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name) static void refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTbl) { - Relation pg_inherits; - ScanKeyData key; - HeapTuple tuple; - SysScanDesc scan; + Relation pg_inherits; + ScanKeyData key; + HeapTuple tuple; + SysScanDesc scan; pg_inherits = heap_open(InheritsRelationId, AccessShareLock); ScanKeyInit(&key, Anum_pg_inherits_inhparent, @@ -14951,7 +14954,7 @@ refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTb NULL, 1, &key); while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - Form_pg_inherits inhForm; + Form_pg_inherits inhForm; Oid tab; inhForm = (Form_pg_inherits) GETSTRUCT(tuple); @@ -14979,12 +14982,12 @@ refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTb static void validatePartitionedIndex(Relation partedIdx, Relation partedTbl) { - Relation inheritsRel; - SysScanDesc scan; - ScanKeyData key; - int tuples = 0; - HeapTuple inhTup; - bool updated = false; + Relation inheritsRel; + SysScanDesc scan; + ScanKeyData key; + int tuples = 0; + HeapTuple inhTup; + bool updated = false; Assert(partedIdx->rd_rel->relkind == RELKIND_PARTITIONED_INDEX); @@ -15002,11 +15005,11 @@ validatePartitionedIndex(Relation partedIdx, Relation partedTbl) while ((inhTup = systable_getnext(scan)) != NULL) { Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(inhTup); - HeapTuple indTup; - Form_pg_index indexForm; + HeapTuple indTup; + Form_pg_index indexForm; indTup = SearchSysCache1(INDEXRELID, - ObjectIdGetDatum(inhForm->inhrelid)); + ObjectIdGetDatum(inhForm->inhrelid)); if (!indTup) elog(ERROR, "cache lookup failed for index %u", inhForm->inhrelid); diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 02d2a0ffd7..88a95896b6 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -5741,8 +5741,9 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, * oldtup should be non-NULL, whereas for UPDATE events normally both * oldtup and newtup are non-NULL. But for UPDATE events fired for * capturing transition tuples during UPDATE partition-key row - * movement, oldtup is NULL when the event is for a row being inserted, - * whereas newtup is NULL when the event is for a row being deleted. + * movement, oldtup is NULL when the event is for a row being + * inserted, whereas newtup is NULL when the event is for a row being + * deleted. */ Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table && oldtup == NULL)); @@ -5769,7 +5770,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, } if (newtup != NULL && ((event == TRIGGER_EVENT_INSERT && insert_new_table) || - (event == TRIGGER_EVENT_UPDATE && update_new_table))) + (event == TRIGGER_EVENT_UPDATE && update_new_table))) { Tuplestorestate *new_tuplestore; @@ -5791,9 +5792,9 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, /* * If transition tables are the only reason we're here, return. As * mentioned above, we can also be here during update tuple routing in - * presence of transition tables, in which case this function is called - * separately for oldtup and newtup, so we expect exactly one of them - * to be NULL. + * presence of transition tables, in which case this function is + * called separately for oldtup and newtup, so we expect exactly one + * of them to be NULL. */ if (trigdesc == NULL || (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) || diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index e530b262da..9d6e25aae5 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -2200,7 +2200,7 @@ ExecEvalFuncExprFusage(ExprState *state, ExprEvalStep *op, */ void ExecEvalFuncExprStrictFusage(ExprState *state, ExprEvalStep *op, - ExprContext *econtext) + ExprContext *econtext) { FunctionCallInfo fcinfo = op->d.func.fcinfo_data; diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index ad8eca0a9d..51d5bd01d3 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1417,6 +1417,7 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) rInfo++; nr--; } + /* * Third, search through the result relations that were created during * tuple routing, if any. diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index a3fb4495d2..8b3663b3c9 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -407,10 +407,9 @@ ExecSetExecProcNode(PlanState *node, ExecProcNodeMtd function) { /* * Add a wrapper around the ExecProcNode callback that checks stack depth - * during the first execution and maybe adds an instrumentation - * wrapper. When the callback is changed after execution has already begun - * that means we'll superfluously execute ExecProcNodeFirst, but that seems - * ok. + * during the first execution and maybe adds an instrumentation wrapper. + * When the callback is changed after execution has already begun that + * means we'll superfluously execute ExecProcNodeFirst, but that seems ok. */ node->ExecProcNodeReal = function; node->ExecProcNode = ExecProcNodeFirst; diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index d14bf2ad69..0beb7f80be 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -674,7 +674,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot) if (HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) < slot->tts_tupleDescriptor->natts) { - HeapTuple tuple; + HeapTuple tuple; MemoryContext oldContext = MemoryContextSwitchTo(slot->tts_mcxt); tuple = heap_expand_tuple(slot->tts_tuple, diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 1b1334006f..7624a3ac6e 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -2365,7 +2365,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) /* for each grouping set */ for (i = 0; i < phasedata->numsets; i++) { - int length = phasedata->gset_lengths[i]; + int length = phasedata->gset_lengths[i]; if (phasedata->eqfunctions[length - 1] != NULL) continue; diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index eaf7d2d563..cdc9c51bd1 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -268,7 +268,7 @@ gather_getnext(GatherState *gatherstate) if (gatherstate->need_to_scan_locally) { - EState *estate = gatherstate->ps.state; + EState *estate = gatherstate->ps.state; /* Install our DSA area while executing the plan. */ estate->es_query_dsa = diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 83221cdbae..a0b3334bed 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -628,7 +628,7 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait) { PlanState *outerPlan = outerPlanState(gm_state); TupleTableSlot *outerTupleSlot; - EState *estate = gm_state->ps.state; + EState *estate = gm_state->ps.state; /* Install our DSA area while executing the plan. */ estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL; diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index ab91eb2527..dd94cffbd1 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -596,7 +596,8 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) List *lclauses; List *rclauses; List *hoperators; - TupleDesc outerDesc, innerDesc; + TupleDesc outerDesc, + innerDesc; ListCell *l; /* check for unsupported flags */ diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index f3cbe2f889..5e52b90c00 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -1436,7 +1436,8 @@ MergeJoinState * ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) { MergeJoinState *mergestate; - TupleDesc outerDesc, innerDesc; + TupleDesc outerDesc, + innerDesc; /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 7ec2c6bcaa..f6482f8411 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -1088,7 +1088,7 @@ lreplace:; */ ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate, estate, &tuple_deleted, false, - false /* canSetTag */, true /* changingPart */); + false /* canSetTag */ , true /* changingPart */ ); /* * For some reason if DELETE didn't happen (e.g. trigger prevented @@ -1678,8 +1678,8 @@ ExecPrepareTupleRouting(ModifyTableState *mtstate, HeapTuple tuple; /* - * Determine the target partition. If ExecFindPartition does not find - * a partition after all, it doesn't return here; otherwise, the returned + * Determine the target partition. If ExecFindPartition does not find a + * partition after all, it doesn't return here; otherwise, the returned * value is to be used as an index into the arrays for the ResultRelInfo * and TupleConversionMap for the partition. */ @@ -2140,7 +2140,7 @@ ExecModifyTable(PlanState *pstate) slot = ExecDelete(node, tupleid, oldtuple, planSlot, &node->mt_epqstate, estate, NULL, true, node->canSetTag, - false /* changingPart */); + false /* changingPart */ ); break; default: elog(ERROR, "unknown operation"); @@ -2310,7 +2310,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && (operation == CMD_INSERT || update_tuple_routing_needed)) mtstate->mt_partition_tuple_routing = - ExecSetupPartitionTupleRouting(mtstate, rel); + ExecSetupPartitionTupleRouting(mtstate, rel); /* * Build state for collecting transition tuples. This requires having a diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c index 872d6e5735..15177dbed7 100644 --- a/src/backend/executor/nodeSamplescan.c +++ b/src/backend/executor/nodeSamplescan.c @@ -153,8 +153,8 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags) RelationGetDescr(scanstate->ss.ss_currentRelation)); /* - * Initialize result slot, type and projection. - * tuple table and result tuple initialization + * Initialize result slot, type and projection. tuple table and result + * tuple initialization */ ExecInitResultTupleSlotTL(estate, &scanstate->ss.ps); ExecAssignScanProjectionInfo(&scanstate->ss); diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c index 73f16c9aba..0d2acb665a 100644 --- a/src/backend/executor/nodeSort.c +++ b/src/backend/executor/nodeSort.c @@ -214,8 +214,8 @@ ExecInitSort(Sort *node, EState *estate, int eflags) ExecCreateScanSlotFromOuterPlan(estate, &sortstate->ss); /* - * Initialize return slot and type. No need to initialize projection info because - * this node doesn't do projections. + * Initialize return slot and type. No need to initialize projection info + * because this node doesn't do projections. */ ExecInitResultTupleSlotTL(estate, &sortstate->ss.ps); sortstate->ss.ps.ps_ProjInfo = NULL; diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index d5411500a2..44f551bcf1 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -974,7 +974,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) /* * Create comparator for lookups of rows in the table (potentially - * across-type comparison). + * across-type comparison). */ sstate->cur_eq_comp = ExecBuildGroupingEqual(tupDescLeft, tupDescRight, ncols, diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c index 6ec087b968..f76999d40a 100644 --- a/src/backend/executor/nodeValuesscan.c +++ b/src/backend/executor/nodeValuesscan.c @@ -131,8 +131,8 @@ ValuesNext(ValuesScanState *node) node->ss.ps.subPlan = NIL; /* - * As the expressions are only ever used once, disable JIT for - * them. This is worthwhile because it's common to insert significant + * As the expressions are only ever used once, disable JIT for them. + * This is worthwhile because it's common to insert significant * amounts of data via VALUES(). */ saved_jit_flags = econtext->ecxt_estate->es_jit_flags; diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c index f37ff826c9..c8ec4be852 100644 --- a/src/backend/jit/llvm/llvmjit_expr.c +++ b/src/backend/jit/llvm/llvmjit_expr.c @@ -2019,8 +2019,8 @@ llvm_compile_expr(ExprState *state) isnull; /* - * At this point aggref->wfuncno is not yet set (it's - * set up in ExecInitWindowAgg() after initializing the + * At this point aggref->wfuncno is not yet set (it's set + * up in ExecInitWindowAgg() after initializing the * expression). So load it from memory each time round. */ v_wfuncnop = l_ptr_const(&wfunc->wfuncno, diff --git a/src/backend/lib/bloomfilter.c b/src/backend/lib/bloomfilter.c index 3565480d13..1a8bc2c36c 100644 --- a/src/backend/lib/bloomfilter.c +++ b/src/backend/lib/bloomfilter.c @@ -262,7 +262,8 @@ static void k_hashes(bloom_filter *filter, uint32 *hashes, unsigned char *elem, size_t len) { uint64 hash; - uint32 x, y; + uint32 x, + y; uint64 m; int i; diff --git a/src/backend/libpq/be-secure-common.c b/src/backend/libpq/be-secure-common.c index 2389e5668f..a3edf27e86 100644 --- a/src/backend/libpq/be-secure-common.c +++ b/src/backend/libpq/be-secure-common.c @@ -130,7 +130,7 @@ bool check_ssl_key_file_permissions(const char *ssl_key_file, bool isServerStart) { int loglevel = isServerStart ? FATAL : LOG; - struct stat buf; + struct stat buf; if (stat(ssl_key_file, &buf) != 0) { diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 54cb352b8f..48b468f62f 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -125,6 +125,7 @@ be_tls_init(bool isServerStart) if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload) SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb); else + /* * If reloading and no external command is configured, override * OpenSSL's default handling of passphrase-protected files, @@ -1139,8 +1140,8 @@ be_tls_get_certificate_hash(Port *port, size_t *len) return NULL; /* - * Get the signature algorithm of the certificate to determine the - * hash algorithm to use for the result. + * Get the signature algorithm of the certificate to determine the hash + * algorithm to use for the result. */ if (!OBJ_find_sigid_algs(X509_get_signature_nid(server_cert), &algo_nid, NULL)) diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index 81182f2518..9bf9a29d6b 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -1168,6 +1168,7 @@ bms_prev_member(const Bitmapset *a, int prevbit) { int result; int shift = BITS_PER_BITMAPWORD - 8; + result = wordnum * BITS_PER_BITMAPWORD; while ((w >> shift) == 0) diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c index d3c742693b..a775f9120e 100644 --- a/src/backend/nodes/read.c +++ b/src/backend/nodes/read.c @@ -216,9 +216,9 @@ nodeTokenType(char *token, int length) { /* * Yes. Figure out whether it is integral or float; this requires - * both a syntax check and a range check. strtoint() can do both for us. - * We know the token will end at a character that strtoint will stop at, - * so we do not need to modify the string. + * both a syntax check and a range check. strtoint() can do both for + * us. We know the token will end at a character that strtoint will + * stop at, so we do not need to modify the string. */ char *endptr; diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index afc663cfd8..477b9f7fb8 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -963,10 +963,10 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * We need attr_needed data for building targetlist of a join * relation representing join between matching partitions for - * partitionwise join. A given attribute of a child will be - * needed in the same highest joinrel where the corresponding - * attribute of parent is needed. Hence it suffices to use the - * same Relids set for parent and child. + * partitionwise join. A given attribute of a child will be needed + * in the same highest joinrel where the corresponding attribute + * of parent is needed. Hence it suffices to use the same Relids + * set for parent and child. */ for (attno = rel->min_attr; attno <= rel->max_attr; attno++) { @@ -2742,11 +2742,10 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) join_search_one_level(root, lev); /* - * Run generate_partitionwise_join_paths() and - * generate_gather_paths() for each just-processed joinrel. We could - * not do this earlier because both regular and partial paths can get - * added to a particular joinrel at multiple times within - * join_search_one_level. + * Run generate_partitionwise_join_paths() and generate_gather_paths() + * for each just-processed joinrel. We could not do this earlier + * because both regular and partial paths can get added to a + * particular joinrel at multiple times within join_search_one_level. * * After that, we're done creating paths for the joinrel, so run * set_cheapest(). diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index 07d55a59ad..f295558f76 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -2696,6 +2696,7 @@ match_clause_to_ordering_op(IndexOptInfo *index, opfamily = index->opfamily[indexcol]; idxcollation = index->indexcollations[indexcol]; + /* * Clause must be a binary opclause. */ @@ -3945,7 +3946,7 @@ adjust_rowcompare_for_index(RowCompareExpr *clause, IndexCollMatchesExprColl(index->indexcollations[i], lfirst_oid(collids_cell))) - break; + break; } if (i >= index->ncolumns) break; /* no match found */ diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index 6d41d307ea..7008e1318e 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -41,9 +41,9 @@ static void populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, RelOptInfo *joinrel, SpecialJoinInfo *sjinfo, List *restrictlist); static void try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, - RelOptInfo *rel2, RelOptInfo *joinrel, - SpecialJoinInfo *parent_sjinfo, - List *parent_restrictlist); + RelOptInfo *rel2, RelOptInfo *joinrel, + SpecialJoinInfo *parent_sjinfo, + List *parent_restrictlist); static int match_expr_to_partition_keys(Expr *expr, RelOptInfo *rel, bool strict_op); @@ -1309,8 +1309,8 @@ restriction_is_constant_false(List *restrictlist, */ static void try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, - RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo, - List *parent_restrictlist) + RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo, + List *parent_restrictlist) { int nparts; int cnt_parts; @@ -1338,8 +1338,8 @@ try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, joinrel->part_scheme == rel2->part_scheme); /* - * Since we allow partitionwise join only when the partition bounds of - * the joining relations exactly match, the partition bounds of the join + * Since we allow partitionwise join only when the partition bounds of the + * joining relations exactly match, the partition bounds of the join * should match those of the joining relations. */ Assert(partition_bounds_equal(joinrel->part_scheme->partnatts, diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 24e6ee026e..67a2c7a581 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -6797,10 +6797,10 @@ apply_scanjoin_target_to_paths(PlannerInfo *root, { /* * Since we can't generate the final scan/join target, this is our - * last opportunity to use any partial paths that exist. We don't - * do this if the case where the target is parallel-safe, since we - * will be able to generate superior paths by doing it after the - * final scan/join target has been applied. + * last opportunity to use any partial paths that exist. We don't do + * this if the case where the target is parallel-safe, since we will + * be able to generate superior paths by doing it after the final + * scan/join target has been applied. * * Note that this may invalidate rel->cheapest_total_path, so we must * not rely on it after this point without first calling set_cheapest. diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index 61d0770f10..0ab4014be6 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -1688,9 +1688,9 @@ expand_partitioned_rtentry(PlannerInfo *root, RangeTblEntry *parentrte, /* * Note down whether any partition key cols are being updated. Though it's * the root partitioned table's updatedCols we are interested in, we - * instead use parentrte to get the updatedCols. This is convenient because - * parentrte already has the root partrel's updatedCols translated to match - * the attribute ordering of parentrel. + * instead use parentrte to get the updatedCols. This is convenient + * because parentrte already has the root partrel's updatedCols translated + * to match the attribute ordering of parentrel. */ if (!root->partColsUpdated) root->partColsUpdated = diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 6973fe3458..8369e3ad62 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -1421,6 +1421,7 @@ relation_excluded_by_constraints(PlannerInfo *root, switch (constraint_exclusion) { case CONSTRAINT_EXCLUSION_OFF: + /* * Don't prune if feature turned off -- except if the relation is * a partition. While partprune.c-style partition pruning is not @@ -1435,6 +1436,7 @@ relation_excluded_by_constraints(PlannerInfo *root, return false; case CONSTRAINT_EXCLUSION_PARTITION: + /* * When constraint_exclusion is set to 'partition' we only handle * OTHER_MEMBER_RELs, or BASERELs in cases where the result target @@ -1444,11 +1446,11 @@ relation_excluded_by_constraints(PlannerInfo *root, !(rel->reloptkind == RELOPT_BASEREL && root->inhTargetKind != INHKIND_NONE && rel->relid == root->parse->resultRelation)) - return false; + return false; break; case CONSTRAINT_EXCLUSION_ON: - break; /* always try to exclude */ + break; /* always try to exclude */ } /* diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 0c66ea1dfc..05f57591e4 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -77,7 +77,7 @@ static Query *transformExplainStmt(ParseState *pstate, static Query *transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt); static Query *transformCallStmt(ParseState *pstate, - CallStmt *stmt); + CallStmt *stmt); static void transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc, bool pushedDown); #ifdef RAW_EXPRESSION_COVERAGE_TEST diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index c6f3628def..da5ede866c 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -484,10 +484,10 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, cxt->blist = lappend(cxt->blist, seqstmt); /* - * Store the identity sequence name that we decided on. ALTER TABLE - * ... ADD COLUMN ... IDENTITY needs this so that it can fill the new - * column with values from the sequence, while the association of the - * sequence with the table is not set until after the ALTER TABLE. + * Store the identity sequence name that we decided on. ALTER TABLE ... + * ADD COLUMN ... IDENTITY needs this so that it can fill the new column + * with values from the sequence, while the association of the sequence + * with the table is not set until after the ALTER TABLE. */ column->identitySequence = seqstmt->sequence; @@ -1193,14 +1193,14 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla */ if (table_like_clause->options & CREATE_TABLE_LIKE_STATISTICS) { - List *parent_extstats; - ListCell *l; + List *parent_extstats; + ListCell *l; parent_extstats = RelationGetStatExtList(relation); foreach(l, parent_extstats) { - Oid parent_stat_oid = lfirst_oid(l); + Oid parent_stat_oid = lfirst_oid(l); CreateStatsStmt *stats_stmt; stats_stmt = generateClonedExtStatsStmt(cxt->relation, @@ -1643,16 +1643,16 @@ static CreateStatsStmt * generateClonedExtStatsStmt(RangeVar *heapRel, Oid heapRelid, Oid source_statsid) { - HeapTuple ht_stats; + HeapTuple ht_stats; Form_pg_statistic_ext statsrec; CreateStatsStmt *stats; - List *stat_types = NIL; - List *def_names = NIL; - bool isnull; - Datum datum; - ArrayType *arr; - char *enabled; - int i; + List *stat_types = NIL; + List *def_names = NIL; + bool isnull; + Datum datum; + ArrayType *arr; + char *enabled; + int i; Assert(OidIsValid(heapRelid)); Assert(heapRel != NULL); diff --git a/src/backend/partitioning/partprune.c b/src/backend/partitioning/partprune.c index 62159477c1..f954b92a6b 100644 --- a/src/backend/partitioning/partprune.c +++ b/src/backend/partitioning/partprune.c @@ -1486,7 +1486,7 @@ match_clause_to_partition_key(RelOptInfo *rel, */ if (op_in_opfamily(opclause->opno, partopfamily)) { - Oid oper; + Oid oper; oper = OidIsValid(commutator) ? commutator : opclause->opno; get_op_opfamily_properties(oper, partopfamily, false, @@ -1528,11 +1528,11 @@ match_clause_to_partition_key(RelOptInfo *rel, { switch (part_scheme->strategy) { - /* - * For range and list partitioning, we need the ordering - * procedure with lefttype being the partition key's type, and - * righttype the clause's operator's right type. - */ + /* + * For range and list partitioning, we need the ordering + * procedure with lefttype being the partition key's type, + * and righttype the clause's operator's right type. + */ case PARTITION_STRATEGY_LIST: case PARTITION_STRATEGY_RANGE: cmpfn = @@ -1541,10 +1541,10 @@ match_clause_to_partition_key(RelOptInfo *rel, op_righttype, BTORDER_PROC); break; - /* - * For hash partitioning, we need the hashing procedure for - * the clause's type. - */ + /* + * For hash partitioning, we need the hashing procedure + * for the clause's type. + */ case PARTITION_STRATEGY_HASH: cmpfn = get_opfamily_proc(part_scheme->partopfamily[partkeyidx], diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c index fa80cebfbd..f8ca52e1af 100644 --- a/src/backend/port/win32_shmem.c +++ b/src/backend/port/win32_shmem.c @@ -112,9 +112,9 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2) static bool EnableLockPagesPrivilege(int elevel) { - HANDLE hToken; + HANDLE hToken; TOKEN_PRIVILEGES tp; - LUID luid; + LUID luid; if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken)) { @@ -267,8 +267,8 @@ retry: size); /* - * Use the original size, not the rounded-up value, when falling back - * to non-huge pages. + * Use the original size, not the rounded-up value, when + * falling back to non-huge pages. */ size = orig_size; flProtect = PAGE_READWRITE; diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index a79048d233..bc9f585b85 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -243,8 +243,8 @@ perform_base_backup(basebackup_options *opt) /* * Once do_pg_start_backup has been called, ensure that any failure causes * us to abort the backup so we don't "leak" a backup counter. For this - * reason, *all* functionality between do_pg_start_backup() and - * the end of do_pg_stop_backup() should be inside the error cleanup block! + * reason, *all* functionality between do_pg_start_backup() and the end of + * do_pg_stop_backup() should be inside the error cleanup block! */ PG_ENSURE_ERROR_CLEANUP(base_backup_cleanup, (Datum) 0); @@ -598,7 +598,7 @@ perform_base_backup(basebackup_options *opt) { if (total_checksum_failures > 1) { - char buf[64]; + char buf[64]; snprintf(buf, sizeof(buf), INT64_FORMAT, total_checksum_failures); @@ -1015,15 +1015,15 @@ sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces, char pathbuf[MAXPGPATH * 2]; struct stat statbuf; int64 size = 0; - const char *lastDir; /* Split last dir from parent path. */ - bool isDbDir = false; /* Does this directory contain relations? */ + const char *lastDir; /* Split last dir from parent path. */ + bool isDbDir = false; /* Does this directory contain relations? */ /* - * Determine if the current path is a database directory that can - * contain relations. + * Determine if the current path is a database directory that can contain + * relations. * - * Start by finding the location of the delimiter between the parent - * path and the current path. + * Start by finding the location of the delimiter between the parent path + * and the current path. */ lastDir = last_dir_separator(path); @@ -1032,7 +1032,7 @@ sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces, strspn(lastDir + 1, "0123456789") == strlen(lastDir + 1)) { /* Part of path that contains the parent directory. */ - int parentPathLen = lastDir - path; + int parentPathLen = lastDir - path; /* * Mark path as a database directory if the parent path is either @@ -1051,7 +1051,7 @@ sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces, { int excludeIdx; bool excludeFound; - ForkNumber relForkNum; /* Type of fork if file is a relation */ + ForkNumber relForkNum; /* Type of fork if file is a relation */ int relOidChars; /* Chars in filename that are the rel oid */ /* Skip special stuff */ @@ -1104,8 +1104,8 @@ sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces, /* Never exclude init forks */ if (relForkNum != INIT_FORKNUM) { - char initForkFile[MAXPGPATH]; - char relOid[OIDCHARS + 1]; + char initForkFile[MAXPGPATH]; + char relOid[OIDCHARS + 1]; /* * If any other type of fork, check if there is an init fork @@ -1417,10 +1417,10 @@ sendFile(const char *readfilename, const char *tarfilename, struct stat *statbuf while ((cnt = fread(buf, 1, Min(sizeof(buf), statbuf->st_size - len), fp)) > 0) { /* - * The checksums are verified at block level, so we iterate over - * the buffer in chunks of BLCKSZ, after making sure that - * TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple - * of BLCKSZ bytes. + * The checksums are verified at block level, so we iterate over the + * buffer in chunks of BLCKSZ, after making sure that + * TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple of + * BLCKSZ bytes. */ Assert(TAR_SEND_SIZE % BLCKSZ == 0); @@ -1445,9 +1445,8 @@ sendFile(const char *readfilename, const char *tarfilename, struct stat *statbuf * start of the base backup. Otherwise, they might have been * written only halfway and the checksum would not be valid. * However, replaying WAL would reinstate the correct page in - * this case. - * We also skip completely new pages, since they don't have - * a checksum yet. + * this case. We also skip completely new pages, since they + * don't have a checksum yet. */ if (!PageIsNew(page) && PageGetLSN(page) < startptr) { diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index e4d261bd79..bd48906160 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -54,7 +54,7 @@ static WalReceiverConn *libpqrcv_connect(const char *conninfo, static void libpqrcv_check_conninfo(const char *conninfo); static char *libpqrcv_get_conninfo(WalReceiverConn *conn); static void libpqrcv_get_senderinfo(WalReceiverConn *conn, - char **sender_host, int *sender_port); + char **sender_host, int *sender_port); static char *libpqrcv_identify_system(WalReceiverConn *conn, TimeLineID *primary_tli, int *server_version); @@ -291,9 +291,9 @@ libpqrcv_get_conninfo(WalReceiverConn *conn) */ static void libpqrcv_get_senderinfo(WalReceiverConn *conn, char **sender_host, - int *sender_port) + int *sender_port) { - char *ret = NULL; + char *ret = NULL; *sender_host = NULL; *sender_port = 0; diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 0737c7b1e7..1393591538 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -63,7 +63,7 @@ static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change); static void truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, - int nrelations, Relation relations[], ReorderBufferChange *change); + int nrelations, Relation relations[], ReorderBufferChange *change); static void message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, XLogRecPtr message_lsn, bool transactional, const char *prefix, Size message_size, const char *message); diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c index edc97a7662..19451714da 100644 --- a/src/backend/replication/logical/proto.c +++ b/src/backend/replication/logical/proto.c @@ -305,7 +305,7 @@ logicalrep_write_truncate(StringInfo out, bool cascade, bool restart_seqs) { int i; - uint8 flags = 0; + uint8 flags = 0; pq_sendbyte(out, 'T'); /* action TRUNCATE */ @@ -332,7 +332,7 @@ logicalrep_read_truncate(StringInfo in, int i; int nrelids; List *relids = NIL; - uint8 flags; + uint8 flags; nrelids = pq_getmsgint(in, 4); diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 596c91e9a9..e2f59bf580 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -1493,37 +1493,37 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, break; case REORDER_BUFFER_CHANGE_TRUNCATE: - { - int i; - int nrelids = change->data.truncate.nrelids; - int nrelations = 0; - Relation *relations; - - relations = palloc0(nrelids * sizeof(Relation)); - for (i = 0; i < nrelids; i++) { - Oid relid = change->data.truncate.relids[i]; - Relation relation; + int i; + int nrelids = change->data.truncate.nrelids; + int nrelations = 0; + Relation *relations; - relation = RelationIdGetRelation(relid); + relations = palloc0(nrelids * sizeof(Relation)); + for (i = 0; i < nrelids; i++) + { + Oid relid = change->data.truncate.relids[i]; + Relation relation; - if (relation == NULL) - elog(ERROR, "could not open relation with OID %u", relid); + relation = RelationIdGetRelation(relid); - if (!RelationIsLogicallyLogged(relation)) - continue; + if (relation == NULL) + elog(ERROR, "could not open relation with OID %u", relid); - relations[nrelations++] = relation; + if (!RelationIsLogicallyLogged(relation)) + continue; + + relations[nrelations++] = relation; + } + + rb->apply_truncate(rb, txn, nrelations, relations, change); + + for (i = 0; i < nrelations; i++) + RelationClose(relations[i]); + + break; } - rb->apply_truncate(rb, txn, nrelations, relations, change); - - for (i = 0; i < nrelations; i++) - RelationClose(relations[i]); - - break; - } - case REORDER_BUFFER_CHANGE_MESSAGE: rb->message(rb, txn, change->lsn, true, change->data.msg.prefix, @@ -1744,7 +1744,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid) if (txn->serialized && txn->final_lsn == 0) { ReorderBufferChange *last = - dlist_tail_element(ReorderBufferChange, node, &txn->changes); + dlist_tail_element(ReorderBufferChange, node, &txn->changes); txn->final_lsn = last->lsn; } @@ -2660,9 +2660,9 @@ ReorderBufferSerializedPath(char *path, ReplicationSlot *slot, TransactionId xid XLogSegNoOffsetToRecPtr(segno, 0, recptr, wal_segment_size); snprintf(path, MAXPGPATH, "pg_replslot/%s/xid-%u-lsn-%X-%X.snap", - NameStr(MyReplicationSlot->data.name), - xid, - (uint32) (recptr >> 32), (uint32) recptr); + NameStr(MyReplicationSlot->data.name), + xid, + (uint32) (recptr >> 32), (uint32) recptr); } /* diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 2bcf56ca2f..2ed8144497 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -899,14 +899,14 @@ apply_handle_delete(StringInfo s) static void apply_handle_truncate(StringInfo s) { - bool cascade = false; - bool restart_seqs = false; - List *remote_relids = NIL; - List *remote_rels = NIL; - List *rels = NIL; - List *relids = NIL; - List *relids_logged = NIL; - ListCell *lc; + bool cascade = false; + bool restart_seqs = false; + List *remote_relids = NIL; + List *remote_rels = NIL; + List *rels = NIL; + List *relids = NIL; + List *relids_logged = NIL; + ListCell *lc; ensure_transaction(); @@ -936,9 +936,9 @@ apply_handle_truncate(StringInfo s) } /* - * Even if we used CASCADE on the upstream master we explicitly - * default to replaying changes without further cascading. - * This might be later changeable with a user specified option. + * Even if we used CASCADE on the upstream master we explicitly default to + * replaying changes without further cascading. This might be later + * changeable with a user specified option. */ ExecuteTruncateGuts(rels, relids, relids_logged, DROP_RESTRICT, restart_seqs); diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 06dfbc082f..a3e5300679 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -40,8 +40,8 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change); static void pgoutput_truncate(LogicalDecodingContext *ctx, - ReorderBufferTXN *txn, int nrelations, Relation relations[], - ReorderBufferChange *change); + ReorderBufferTXN *txn, int nrelations, Relation relations[], + ReorderBufferChange *change); static bool pgoutput_origin_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index e873dd1f81..d9e10263bb 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -342,8 +342,8 @@ static XLogRecPtr pg_logical_replication_slot_advance(XLogRecPtr startlsn, XLogRecPtr moveto) { LogicalDecodingContext *ctx; - ResourceOwner old_resowner = CurrentResourceOwner; - XLogRecPtr retlsn = InvalidXLogRecPtr; + ResourceOwner old_resowner = CurrentResourceOwner; + XLogRecPtr retlsn = InvalidXLogRecPtr; PG_TRY(); { diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index b9dab322d6..987bb84683 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -1461,8 +1461,8 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) { /* * Only superusers and members of pg_read_all_stats can see details. - * Other users only get the pid value - * to know whether it is a WAL receiver, but no details. + * Other users only get the pid value to know whether it is a WAL + * receiver, but no details. */ MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1)); } diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 642e859439..e47ddca6bc 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -1153,7 +1153,7 @@ static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write) { - TimestampTz now; + TimestampTz now; /* output previously gathered data in a CopyData packet */ pq_putmessage_noblock('d', ctx->out->data, ctx->out->len); @@ -3247,9 +3247,9 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS) if (!is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS)) { /* - * Only superusers and members of pg_read_all_stats can see details. - * Other users only get the pid value to know it's a walsender, - * but no details. + * Only superusers and members of pg_read_all_stats can see + * details. Other users only get the pid value to know it's a + * walsender, but no details. */ MemSet(&nulls[1], true, PG_STAT_GET_WAL_SENDERS_COLS - 1); } diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c index c058c3fc43..9cdddba510 100644 --- a/src/backend/storage/file/buffile.c +++ b/src/backend/storage/file/buffile.c @@ -213,9 +213,9 @@ MakeNewSharedSegment(BufFile *buffile, int segment) /* * It is possible that there are files left over from before a crash - * restart with the same name. In order for BufFileOpenShared() - * not to get confused about how many segments there are, we'll unlink - * the next segment number if it already exists. + * restart with the same name. In order for BufFileOpenShared() not to + * get confused about how many segments there are, we'll unlink the next + * segment number if it already exists. */ SharedSegmentName(name, buffile->name, segment + 1); SharedFileSetDelete(buffile->fileset, name, true); diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index c80cb6e2f7..9c227c0a64 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -1203,9 +1203,10 @@ shm_mq_inc_bytes_read(shm_mq *mq, Size n) /* * Separate prior reads of mq_ring from the increment of mq_bytes_read - * which follows. This pairs with the full barrier in shm_mq_send_bytes(). - * We only need a read barrier here because the increment of mq_bytes_read - * is actually a read followed by a dependent write. + * which follows. This pairs with the full barrier in + * shm_mq_send_bytes(). We only need a read barrier here because the + * increment of mq_bytes_read is actually a read followed by a dependent + * write. */ pg_read_barrier(); diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 6e7f4545ad..287addf429 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -792,9 +792,9 @@ standard_ProcessUtility(PlannedStmt *pstmt, * intended effect! */ PreventInTransactionBlock(isTopLevel, - (stmt->kind == REINDEX_OBJECT_SCHEMA) ? "REINDEX SCHEMA" : - (stmt->kind == REINDEX_OBJECT_SYSTEM) ? "REINDEX SYSTEM" : - "REINDEX DATABASE"); + (stmt->kind == REINDEX_OBJECT_SCHEMA) ? "REINDEX SCHEMA" : + (stmt->kind == REINDEX_OBJECT_SYSTEM) ? "REINDEX SYSTEM" : + "REINDEX DATABASE"); ReindexMultipleTables(stmt->name, stmt->kind, stmt->options); break; default: @@ -1291,7 +1291,7 @@ ProcessUtilitySlow(ParseState *pstate, if (stmt->concurrent) PreventInTransactionBlock(isTopLevel, - "CREATE INDEX CONCURRENTLY"); + "CREATE INDEX CONCURRENTLY"); /* * Look up the relation OID just once, right here at the @@ -1700,7 +1700,7 @@ ExecDropStmt(DropStmt *stmt, bool isTopLevel) case OBJECT_INDEX: if (stmt->concurrent) PreventInTransactionBlock(isTopLevel, - "DROP INDEX CONCURRENTLY"); + "DROP INDEX CONCURRENTLY"); /* fall through */ case OBJECT_TABLE: diff --git a/src/backend/tsearch/to_tsany.c b/src/backend/tsearch/to_tsany.c index 2474b723b4..4b44b85642 100644 --- a/src/backend/tsearch/to_tsany.c +++ b/src/backend/tsearch/to_tsany.c @@ -660,7 +660,7 @@ Datum websearch_to_tsquery_byid(PG_FUNCTION_ARGS) { text *in = PG_GETARG_TEXT_PP(1); - MorphOpaque data; + MorphOpaque data; TSQuery query = NULL; data.cfg_id = PG_GETARG_OID(0); diff --git a/src/backend/utils/adt/amutils.c b/src/backend/utils/adt/amutils.c index 0f8ad4ef0f..dc04148b78 100644 --- a/src/backend/utils/adt/amutils.c +++ b/src/backend/utils/adt/amutils.c @@ -187,8 +187,8 @@ indexam_property(FunctionCallInfo fcinfo, } /* - * At this point, either index_oid == InvalidOid or it's a valid index OID. - * Also, after this test and the one below, either attno == 0 for + * At this point, either index_oid == InvalidOid or it's a valid index + * OID. Also, after this test and the one below, either attno == 0 for * index-wide or AM-wide tests, or it's a valid column number in a valid * index. */ @@ -276,6 +276,7 @@ indexam_property(FunctionCallInfo fcinfo, break; case AMPROP_ORDERABLE: + /* * generic assumption is that nonkey columns are not orderable */ @@ -293,8 +294,9 @@ indexam_property(FunctionCallInfo fcinfo, * getting there from just the index column type seems like a * lot of work. So instead we expect the AM to handle this in * its amproperty routine. The generic result is to return - * false if the AM says it never supports this, or if this is a - * nonkey column, and null otherwise (meaning we don't know). + * false if the AM says it never supports this, or if this is + * a nonkey column, and null otherwise (meaning we don't + * know). */ if (!iskey || !routine->amcanorderbyop) { @@ -314,8 +316,8 @@ indexam_property(FunctionCallInfo fcinfo, { /* * If possible, the AM should handle this test in its - * amproperty function without opening the rel. But this is the - * generic fallback if it does not. + * amproperty function without opening the rel. But this + * is the generic fallback if it does not. */ Relation indexrel = index_open(index_oid, AccessShareLock); diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 1a1088711c..a345c65605 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -3905,7 +3905,7 @@ do_to_timestamp(text *date_txt, text *fmt, DateTimeParseError(DTERR_TZDISP_OVERFLOW, date_str, "timestamp"); tz = psprintf("%c%02d:%02d", - tmfc.tzsign > 0 ? '+' : '-', tmfc.tzh, tmfc.tzm); + tmfc.tzsign > 0 ? '+' : '-', tmfc.tzh, tmfc.tzm); tm->tm_zone = tz; } diff --git a/src/backend/utils/adt/geo_spgist.c b/src/backend/utils/adt/geo_spgist.c index 3f1a755cbb..06411aea9e 100644 --- a/src/backend/utils/adt/geo_spgist.c +++ b/src/backend/utils/adt/geo_spgist.c @@ -686,10 +686,10 @@ spg_box_quad_leaf_consistent(PG_FUNCTION_ARGS) /* Perform the required comparison(s) */ for (i = 0; i < in->nkeys; i++) { - StrategyNumber strategy = in->scankeys[i].sk_strategy; - BOX *box = spg_box_quad_get_scankey_bbox(&in->scankeys[i], - &out->recheck); - Datum query = BoxPGetDatum(box); + StrategyNumber strategy = in->scankeys[i].sk_strategy; + BOX *box = spg_box_quad_get_scankey_bbox(&in->scankeys[i], + &out->recheck); + Datum query = BoxPGetDatum(box); switch (strategy) { @@ -790,7 +790,7 @@ spg_bbox_quad_config(PG_FUNCTION_ARGS) Datum spg_poly_quad_compress(PG_FUNCTION_ARGS) { - POLYGON *polygon = PG_GETARG_POLYGON_P(0); + POLYGON *polygon = PG_GETARG_POLYGON_P(0); BOX *box; box = box_copy(&polygon->boundbox); diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 80d23cc052..e99bbc482a 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -1861,8 +1861,8 @@ JsonbExtractScalar(JsonbContainer *jbc, JsonbValue *res) return NULL; /* - * A root scalar is stored as an array of one element, so we get the - * array and then its first (and only) member. + * A root scalar is stored as an array of one element, so we get the array + * and then its first (and only) member. */ it = JsonbIteratorInit(jbc); @@ -1871,11 +1871,11 @@ JsonbExtractScalar(JsonbContainer *jbc, JsonbValue *res) Assert(tmp.val.array.nElems == 1 && tmp.val.array.rawScalar); tok = JsonbIteratorNext(&it, res, true); - Assert (tok == WJB_ELEM); + Assert(tok == WJB_ELEM); Assert(IsAJsonbScalar(res)); tok = JsonbIteratorNext(&it, &tmp, true); - Assert (tok == WJB_END_ARRAY); + Assert(tok == WJB_END_ARRAY); tok = JsonbIteratorNext(&it, &tmp, true); Assert(tok == WJB_DONE); @@ -1912,7 +1912,8 @@ jsonb_numeric(PG_FUNCTION_ARGS) errmsg("jsonb value must be numeric"))); /* - * v.val.numeric points into jsonb body, so we need to make a copy to return + * v.val.numeric points into jsonb body, so we need to make a copy to + * return */ retValue = DatumGetNumericCopy(NumericGetDatum(v.val.numeric)); @@ -1925,7 +1926,7 @@ Datum jsonb_int2(PG_FUNCTION_ARGS) { Jsonb *in = PG_GETARG_JSONB_P(0); - JsonbValue v; + JsonbValue v; Datum retValue; if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric) @@ -1945,7 +1946,7 @@ Datum jsonb_int4(PG_FUNCTION_ARGS) { Jsonb *in = PG_GETARG_JSONB_P(0); - JsonbValue v; + JsonbValue v; Datum retValue; if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric) @@ -1965,7 +1966,7 @@ Datum jsonb_int8(PG_FUNCTION_ARGS) { Jsonb *in = PG_GETARG_JSONB_P(0); - JsonbValue v; + JsonbValue v; Datum retValue; if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric) diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 202779dfff..2f701603a2 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -60,7 +60,8 @@ typedef struct IterateJsonStringValuesState JsonIterateStringValuesAction action; /* an action that will be applied * to each json value */ void *action_state; /* any necessary context for iteration */ - uint32 flags; /* what kind of elements from a json we want to iterate */ + uint32 flags; /* what kind of elements from a json we want + * to iterate */ } IterateJsonStringValuesState; /* state for transform_json_string_values function */ @@ -4950,19 +4951,19 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, uint32 parse_jsonb_index_flags(Jsonb *jb) { - JsonbIterator *it; - JsonbValue v; - JsonbIteratorToken type; - uint32 flags = 0; + JsonbIterator *it; + JsonbValue v; + JsonbIteratorToken type; + uint32 flags = 0; it = JsonbIteratorInit(&jb->root); type = JsonbIteratorNext(&it, &v, false); /* - * We iterate over array (scalar internally is represented as array, so, we - * will accept it too) to check all its elements. Flag names are chosen - * the same as jsonb_typeof uses. + * We iterate over array (scalar internally is represented as array, so, + * we will accept it too) to check all its elements. Flag names are + * chosen the same as jsonb_typeof uses. */ if (type != WJB_BEGIN_ARRAY) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -4977,7 +4978,7 @@ parse_jsonb_index_flags(Jsonb *jb) errhint("Possible values are: \"string\", \"numeric\", \"boolean\", \"key\" and \"all\""))); if (v.val.string.len == 3 && - pg_strncasecmp(v.val.string.val, "all", 3) == 0) + pg_strncasecmp(v.val.string.val, "all", 3) == 0) flags |= jtiAll; else if (v.val.string.len == 3 && pg_strncasecmp(v.val.string.val, "key", 3) == 0) @@ -5045,7 +5046,7 @@ iterate_jsonb_values(Jsonb *jb, uint32 flags, void *state, } /* JsonbValue is a value of object or element of array */ - switch(v.type) + switch (v.type) { case jbvString: if (flags & jtiString) @@ -5054,10 +5055,10 @@ iterate_jsonb_values(Jsonb *jb, uint32 flags, void *state, case jbvNumeric: if (flags & jtiNumeric) { - char *val; + char *val; val = DatumGetCString(DirectFunctionCall1(numeric_out, - NumericGetDatum(v.val.numeric))); + NumericGetDatum(v.val.numeric))); action(state, val, strlen(val)); pfree(val); @@ -5112,7 +5113,7 @@ iterate_values_scalar(void *state, char *token, JsonTokenType tokentype) { IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state; - switch(tokentype) + switch (tokentype) { case JSON_TOKEN_STRING: if (_state->flags & jtiString) @@ -5140,7 +5141,8 @@ iterate_values_object_field_start(void *state, char *fname, bool isnull) if (_state->flags & jtiKey) { - char *val = pstrdup(fname); + char *val = pstrdup(fname); + _state->action(_state->action_state, val, strlen(val)); } } diff --git a/src/backend/utils/adt/tsquery.c b/src/backend/utils/adt/tsquery.c index 793c0e5dd1..7b9dbfef0c 100644 --- a/src/backend/utils/adt/tsquery.c +++ b/src/backend/utils/adt/tsquery.c @@ -63,9 +63,9 @@ typedef enum * *strval, *lenval and *weight are filled in when return value is PT_VAL * */ -typedef ts_tokentype (*ts_tokenizer)(TSQueryParserState state, int8 *operator, - int *lenval, char **strval, - int16 *weight, bool *prefix); +typedef ts_tokentype (*ts_tokenizer) (TSQueryParserState state, int8 *operator, + int *lenval, char **strval, + int16 *weight, bool *prefix); struct TSQueryParserStateData { @@ -233,7 +233,7 @@ parse_phrase_operator(TSQueryParserState pstate, int16 *distance) static bool parse_or_operator(TSQueryParserState pstate) { - char *ptr = pstate->buf; + char *ptr = pstate->buf; if (pstate->in_quotes) return false; @@ -245,26 +245,26 @@ parse_or_operator(TSQueryParserState pstate) ptr += 2; /* - * it shouldn't be a part of any word but somewhere later it should be some - * operand + * it shouldn't be a part of any word but somewhere later it should be + * some operand */ - if (*ptr == '\0') /* no operand */ + if (*ptr == '\0') /* no operand */ return false; /* it shouldn't be a part of any word */ - if (t_iseq(ptr, '-') || t_iseq(ptr, '_') || t_isalpha(ptr) || t_isdigit(ptr)) + if (t_iseq(ptr, '-') || t_iseq(ptr, '_') || t_isalpha(ptr) || t_isdigit(ptr)) return false; - for(;;) + for (;;) { ptr += pg_mblen(ptr); - if (*ptr == '\0') /* got end of string without operand */ + if (*ptr == '\0') /* got end of string without operand */ return false; /* - * Suppose, we found an operand, but could be a not correct operand. So - * we still treat OR literal as operation with possibly incorrect + * Suppose, we found an operand, but could be a not correct operand. + * So we still treat OR literal as operation with possibly incorrect * operand and will not search it as lexeme */ if (!t_isspace(ptr)) @@ -312,7 +312,10 @@ gettoken_query_standard(TSQueryParserState state, int8 *operator, } else if (!t_isspace(state->buf)) { - /* We rely on the tsvector parser to parse the value for us */ + /* + * We rely on the tsvector parser to parse the value for + * us + */ reset_tsvector_parser(state->valstate, state->buf); if (gettoken_tsvector(state->valstate, strval, lenval, NULL, NULL, &state->buf)) @@ -437,7 +440,10 @@ gettoken_query_websearch(TSQueryParserState state, int8 *operator, } else if (!t_isspace(state->buf)) { - /* We rely on the tsvector parser to parse the value for us */ + /* + * We rely on the tsvector parser to parse the value for + * us + */ reset_tsvector_parser(state->valstate, state->buf); if (gettoken_tsvector(state->valstate, strval, lenval, NULL, NULL, &state->buf)) @@ -464,8 +470,8 @@ gettoken_query_websearch(TSQueryParserState state, int8 *operator, if (!state->in_quotes) { /* - * put implicit AND after an operand - * and handle this quote in WAITOPERAND + * put implicit AND after an operand and handle this + * quote in WAITOPERAND */ state->state = WAITOPERAND; *operator = OP_AND; diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 22ff36714c..3dfb1b8fbe 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -743,7 +743,7 @@ RelationBuildRuleLock(Relation relation) ALLOCSET_SMALL_SIZES); relation->rd_rulescxt = rulescxt; MemoryContextCopyAndSetIdentifier(rulescxt, - RelationGetRelationName(relation)); + RelationGetRelationName(relation)); /* * allocate an array to hold the rewrite rules (the array is extended if @@ -1400,7 +1400,7 @@ RelationInitIndexAccessInfo(Relation relation) ALLOCSET_SMALL_SIZES); relation->rd_indexcxt = indexcxt; MemoryContextCopyAndSetIdentifier(indexcxt, - RelationGetRelationName(relation)); + RelationGetRelationName(relation)); /* * Now we can fetch the index AM's API struct @@ -4678,16 +4678,17 @@ RelationGetIndexPredicate(Relation relation) expensive, so we don't attempt it by default. * 2. "recheck_on_update" index option explicitly set by user, which overrides 1) */ -static bool IsProjectionFunctionalIndex(Relation index, IndexInfo* ii) +static bool +IsProjectionFunctionalIndex(Relation index, IndexInfo *ii) { - bool is_projection = false; + bool is_projection = false; if (ii->ii_Expressions) { - HeapTuple tuple; - Datum reloptions; - bool isnull; - QualCost index_expr_cost; + HeapTuple tuple; + Datum reloptions; + bool isnull; + QualCost index_expr_cost; /* by default functional index is considered as non-injective */ is_projection = true; @@ -4704,7 +4705,7 @@ static bool IsProjectionFunctionalIndex(Relation index, IndexInfo* ii) * inserting a new index entry for the changed value. */ if ((index_expr_cost.startup + index_expr_cost.per_tuple) > - HEURISTIC_MAX_HOT_RECHECK_EXPR_COST) + HEURISTIC_MAX_HOT_RECHECK_EXPR_COST) is_projection = false; tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(RelationGetRelid(index))); @@ -4758,7 +4759,7 @@ Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind) { Bitmapset *indexattrs; /* columns used in non-projection indexes */ - Bitmapset *projindexattrs; /* columns used in projection indexes */ + Bitmapset *projindexattrs; /* columns used in projection indexes */ Bitmapset *uindexattrs; /* columns in unique indexes */ Bitmapset *pkindexattrs; /* columns in the primary index */ Bitmapset *idindexattrs; /* columns in the replica identity */ @@ -4769,7 +4770,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind) Oid relreplindex; ListCell *l; MemoryContext oldcxt; - int indexno; + int indexno; /* Quick exit if we already computed the result. */ if (relation->rd_indexattr != NULL) @@ -5479,7 +5480,7 @@ load_relcache_init_file(bool shared) ALLOCSET_SMALL_SIZES); rel->rd_indexcxt = indexcxt; MemoryContextCopyAndSetIdentifier(indexcxt, - RelationGetRelationName(rel)); + RelationGetRelationName(rel)); /* * Now we can fetch the index AM's API struct. (We can't store diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index aa188bdeff..6cbbd5b78b 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -59,6 +59,7 @@ static void fmgr_info_other_lang(Oid functionId, FmgrInfo *finfo, HeapTuple proc static CFuncHashTabEntry *lookup_C_func(HeapTuple procedureTuple); static void record_C_func(HeapTuple procedureTuple, PGFunction user_fn, const Pg_finfo_record *inforec); + /* extern so it's callable via JIT */ extern Datum fmgr_security_definer(PG_FUNCTION_ARGS); @@ -297,7 +298,7 @@ fmgr_symbol(Oid functionId, char **mod, char **fn) !heap_attisnull(procedureTuple, Anum_pg_proc_proconfig, NULL) || FmgrHookIsNeeded(functionId)) { - *mod = NULL; /* core binary */ + *mod = NULL; /* core binary */ *fn = pstrdup("fmgr_security_definer"); ReleaseSysCache(procedureTuple); return; @@ -312,7 +313,7 @@ fmgr_symbol(Oid functionId, char **mod, char **fn) if (isnull) elog(ERROR, "null prosrc"); - *mod = NULL; /* core binary */ + *mod = NULL; /* core binary */ *fn = TextDatumGetCString(prosrcattr); break; @@ -336,13 +337,13 @@ fmgr_symbol(Oid functionId, char **mod, char **fn) break; case SQLlanguageId: - *mod = NULL; /* core binary */ + *mod = NULL; /* core binary */ *fn = pstrdup("fmgr_sql"); break; default: *mod = NULL; - *fn = NULL; /* unknown, pass pointer */ + *fn = NULL; /* unknown, pass pointer */ break; } diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 44dfa92722..6eae3d62cc 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -1754,6 +1754,7 @@ static struct config_bool ConfigureNamesBool[] = }, &jit_debugging_support, false, + /* * This is not guaranteed to be available, but given it's a developer * oriented option, it doesn't seem worth adding code checking @@ -1792,6 +1793,7 @@ static struct config_bool ConfigureNamesBool[] = }, &jit_profiling_support, false, + /* * This is not guaranteed to be available, but given it's a developer * oriented option, it doesn't seem worth adding code checking diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 53225d6f1b..04ea32f49f 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -108,8 +108,8 @@ EnablePortalManager(void) Assert(TopPortalContext == NULL); TopPortalContext = AllocSetContextCreate(TopMemoryContext, - "TopPortalContext", - ALLOCSET_DEFAULT_SIZES); + "TopPortalContext", + ALLOCSET_DEFAULT_SIZES); ctl.keysize = MAX_PORTALNAME_LEN; ctl.entrysize = sizeof(PortalHashEnt); @@ -630,8 +630,8 @@ static void HoldPortal(Portal portal) { /* - * Note that PersistHoldablePortal() must release all resources - * used by the portal that are local to the creating transaction. + * Note that PersistHoldablePortal() must release all resources used by + * the portal that are local to the creating transaction. */ PortalCreateHoldStore(portal); PersistHoldablePortal(portal); @@ -640,15 +640,15 @@ HoldPortal(Portal portal) PortalReleaseCachedPlan(portal); /* - * Any resources belonging to the portal will be released in the - * upcoming transaction-wide cleanup; the portal will no longer - * have its own resources. + * Any resources belonging to the portal will be released in the upcoming + * transaction-wide cleanup; the portal will no longer have its own + * resources. */ portal->resowner = NULL; /* - * Having successfully exported the holdable cursor, mark it as - * not belonging to this transaction. + * Having successfully exported the holdable cursor, mark it as not + * belonging to this transaction. */ portal->createSubid = InvalidSubTransactionId; portal->activeSubid = InvalidSubTransactionId; @@ -1240,8 +1240,8 @@ HoldPinnedPortals(void) { /* * Doing transaction control, especially abort, inside a cursor - * loop that is not read-only, for example using UPDATE - * ... RETURNING, has weird semantics issues. Also, this + * loop that is not read-only, for example using UPDATE ... + * RETURNING, has weird semantics issues. Also, this * implementation wouldn't work, because such portals cannot be * held. (The core grammar enforces that only SELECT statements * can drive a cursor, but for example PL/pgSQL does not restrict diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c index 3e47fbde8e..265c04b3d3 100644 --- a/src/backend/utils/sort/sharedtuplestore.c +++ b/src/backend/utils/sort/sharedtuplestore.c @@ -47,7 +47,7 @@ typedef struct SharedTuplestoreChunk int ntuples; /* Number of tuples in this chunk. */ int overflow; /* If overflow, how many including this one? */ char data[FLEXIBLE_ARRAY_MEMBER]; -} SharedTuplestoreChunk; +} SharedTuplestoreChunk; /* Per-participant shared state. */ typedef struct SharedTuplestoreParticipant @@ -56,7 +56,7 @@ typedef struct SharedTuplestoreParticipant BlockNumber read_page; /* Page number for next read. */ BlockNumber npages; /* Number of pages written. */ bool writing; /* Used only for assertions. */ -} SharedTuplestoreParticipant; +} SharedTuplestoreParticipant; /* The control object that lives in shared memory. */ struct SharedTuplestore diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c index 77ae91fbe7..3394537d85 100644 --- a/src/bin/pg_basebackup/streamutil.c +++ b/src/bin/pg_basebackup/streamutil.c @@ -219,9 +219,9 @@ GetConnection(void) /* * Set always-secure search path, so malicious users can't get control. - * The capacity to run normal SQL queries was added in PostgreSQL - * 10, so the search path cannot be changed (by us or attackers) on - * earlier versions. + * The capacity to run normal SQL queries was added in PostgreSQL 10, so + * the search path cannot be changed (by us or attackers) on earlier + * versions. */ if (dbname != NULL && PQserverVersion(tmpconn) >= 100000) { diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c index 143021de05..ed2396aa6c 100644 --- a/src/bin/pg_ctl/pg_ctl.c +++ b/src/bin/pg_ctl/pg_ctl.c @@ -1846,7 +1846,8 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, bool as_ser static PTOKEN_PRIVILEGES GetPrivilegesToDelete(HANDLE hToken) { - int i, j; + int i, + j; DWORD length; PTOKEN_PRIVILEGES tokenPrivs; LUID luidLockPages; diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c index e7db78b0ff..0d147cb08d 100644 --- a/src/bin/pg_dump/common.c +++ b/src/bin/pg_dump/common.c @@ -350,8 +350,8 @@ flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables, findParentsByOid(&tblinfo[i], inhinfo, numInherits); /* - * If needed, mark the parents as interesting for getTableAttrs - * and getIndexes. + * If needed, mark the parents as interesting for getTableAttrs and + * getIndexes. */ if (mark_parents) { @@ -372,9 +372,9 @@ flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables, static void flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables) { - int i, - j, - k; + int i, + j, + k; DumpableObject ***parentIndexArray; parentIndexArray = (DumpableObject ***) @@ -382,7 +382,7 @@ flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables) for (i = 0; i < numTables; i++) { - TableInfo *parenttbl; + TableInfo *parenttbl; IndexAttachInfo *attachinfo; if (!tblinfo[i].ispartition || tblinfo[i].numParents == 0) @@ -430,9 +430,9 @@ flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables) /* * We want dependencies from parent to partition (so that the - * partition index is created first), and another one from - * attach object to parent (so that the partition index is - * attached once the parent index has been created). + * partition index is created first), and another one from attach + * object to parent (so that the partition index is attached once + * the parent index has been created). */ addObjectDependency(&parentidx->dobj, index->dobj.dumpId); addObjectDependency(&attachinfo[k].dobj, parentidx->dobj.dumpId); diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index b11fe94212..c5b49459cc 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -16433,13 +16433,13 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo) } else if (coninfo->contype == 'f') { - char *only; + char *only; /* - * Foreign keys on partitioned tables are always declared as inheriting - * to partitions; for all other cases, emit them as applying ONLY - * directly to the named table, because that's how they work for - * regular inherited tables. + * Foreign keys on partitioned tables are always declared as + * inheriting to partitions; for all other cases, emit them as + * applying ONLY directly to the named table, because that's how they + * work for regular inherited tables. */ only = tbinfo->relkind == RELKIND_PARTITIONED_TABLE ? "" : "ONLY "; diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c index e45e6d44ec..8f49d34652 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c @@ -48,7 +48,7 @@ static const char *excludeDirContents[] = * when stats_temp_directory is set because PGSS_TEXT_FILE is always * created there. */ - "pg_stat_tmp", /* defined as PG_STAT_TMP_DIR */ + "pg_stat_tmp", /* defined as PG_STAT_TMP_DIR */ /* * It is generally not useful to backup the contents of this directory @@ -58,7 +58,7 @@ static const char *excludeDirContents[] = "pg_replslot", /* Contents removed on startup, see dsm_cleanup_for_mmap(). */ - "pg_dynshmem", /* defined as PG_DYNSHMEM_DIR */ + "pg_dynshmem", /* defined as PG_DYNSHMEM_DIR */ /* Contents removed on startup, see AsyncShmemInit(). */ "pg_notify", @@ -492,9 +492,9 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno) static bool check_file_excluded(const char *path, const char *type) { - char localpath[MAXPGPATH]; - int excludeIdx; - const char *filename; + char localpath[MAXPGPATH]; + int excludeIdx; + const char *filename; /* check individual files... */ for (excludeIdx = 0; excludeFiles[excludeIdx] != NULL; excludeIdx++) @@ -733,8 +733,8 @@ isRelDataFile(const char *path) /* * The sscanf tests above can match files that have extra characters at * the end. To eliminate such cases, cross-check that GetRelationPath - * creates the exact same filename, when passed the RelFileNode information - * we extracted from the filename. + * creates the exact same filename, when passed the RelFileNode + * information we extracted from the filename. */ if (matched) { diff --git a/src/bin/pg_upgrade/exec.c b/src/bin/pg_upgrade/exec.c index 9122e2769e..3d2de83a90 100644 --- a/src/bin/pg_upgrade/exec.c +++ b/src/bin/pg_upgrade/exec.c @@ -110,6 +110,7 @@ exec_prog(const char *log_file, const char *opt_log_file, pg_log(PG_VERBOSE, "%s\n", cmd); #ifdef WIN32 + /* * For some reason, Windows issues a file-in-use error if we write data to * the log file from a non-primary thread just before we create a @@ -191,6 +192,7 @@ exec_prog(const char *log_file, const char *opt_log_file, } #ifndef WIN32 + /* * We can't do this on Windows because it will keep the "pg_ctl start" * output filename open until the server stops, so we do the \n\n above on diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c index 5273ef6681..fccc21836a 100644 --- a/src/bin/pg_upgrade/server.c +++ b/src/bin/pg_upgrade/server.c @@ -309,8 +309,8 @@ start_postmaster(ClusterInfo *cluster, bool report_and_exit_on_error) /* * If pg_ctl failed, and the connection didn't fail, and - * report_and_exit_on_error is enabled, fail now. This - * could happen if the server was already running. + * report_and_exit_on_error is enabled, fail now. This could happen if + * the server was already running. */ if (!pg_ctl_return) { diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index fd1856837a..78b8f1706c 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -156,7 +156,7 @@ char *tablespace = NULL; char *index_tablespace = NULL; /* random seed used when calling srandom() */ -int64 random_seed = -1; +int64 random_seed = -1; /* * end of configurable parameters @@ -820,7 +820,7 @@ generalizedHarmonicNumber(int64 n, double s) /* set harmonicn and other parameters to cache cell */ static void -zipfSetCacheCell(ZipfCell * cell, int64 n, double s) +zipfSetCacheCell(ZipfCell *cell, int64 n, double s) { double harmonic2; @@ -840,7 +840,7 @@ zipfSetCacheCell(ZipfCell * cell, int64 n, double s) * and create new cell if it does not exist */ static ZipfCell * -zipfFindOrCreateCacheCell(ZipfCache * cache, int64 n, double s) +zipfFindOrCreateCacheCell(ZipfCache *cache, int64 n, double s) { int i, least_recently_used = 0; @@ -943,13 +943,13 @@ getZipfianRand(TState *thread, int64 min, int64 max, double s) static int64 getHashFnv1a(int64 val, uint64 seed) { - int64 result; - int i; + int64 result; + int i; result = FNV_OFFSET_BASIS ^ seed; for (i = 0; i < 8; ++i) { - int32 octet = val & 0xff; + int32 octet = val & 0xff; val = val >> 8; result = result ^ octet; @@ -968,8 +968,8 @@ getHashFnv1a(int64 val, uint64 seed) static int64 getHashMurmur2(int64 val, uint64 seed) { - uint64 result = seed ^ (sizeof(int64) * MM2_MUL); - uint64 k = (uint64) val; + uint64 result = seed ^ (sizeof(int64) * MM2_MUL); + uint64 k = (uint64) val; k *= MM2_MUL; k ^= k >> MM2_ROT; @@ -1236,7 +1236,7 @@ getVariable(CState *st, char *name) else if (var->value.type == PGBT_DOUBLE) snprintf(stringform, sizeof(stringform), "%.*g", DBL_DIG, var->value.u.dval); - else /* internal error, unexpected type */ + else /* internal error, unexpected type */ Assert(0); var->svalue = pg_strdup(stringform); return var->svalue; @@ -1246,7 +1246,7 @@ getVariable(CState *st, char *name) static bool makeVariableValue(Variable *var) { - size_t slen; + size_t slen; if (var->value.type != PGBT_NO_VALUE) return true; /* no work */ @@ -1261,10 +1261,10 @@ makeVariableValue(Variable *var) { setNullValue(&var->value); } + /* - * accept prefixes such as y, ye, n, no... but not for "o". - * 0/1 are recognized later as an int, which is converted - * to bool if needed. + * accept prefixes such as y, ye, n, no... but not for "o". 0/1 are + * recognized later as an int, which is converted to bool if needed. */ else if (pg_strncasecmp(var->svalue, "true", slen) == 0 || pg_strncasecmp(var->svalue, "yes", slen) == 0 || @@ -1410,7 +1410,7 @@ putVariable(CState *st, const char *context, char *name, const char *value) /* Returns false on failure (bad name) */ static bool putVariableValue(CState *st, const char *context, char *name, - const PgBenchValue *value) + const PgBenchValue *value) { Variable *var; @@ -1563,7 +1563,7 @@ coerceToBool(PgBenchValue *pval, bool *bval) *bval = pval->u.bval; return true; } - else /* NULL, INT or DOUBLE */ + else /* NULL, INT or DOUBLE */ { fprintf(stderr, "cannot coerce %s to boolean\n", valueTypeName(pval)); *bval = false; /* suppress uninitialized-variable warnings */ @@ -1616,7 +1616,7 @@ coerceToInt(PgBenchValue *pval, int64 *ival) *ival = (int64) dval; return true; } - else /* BOOLEAN or NULL */ + else /* BOOLEAN or NULL */ { fprintf(stderr, "cannot coerce %s to int\n", valueTypeName(pval)); return false; @@ -1637,7 +1637,7 @@ coerceToDouble(PgBenchValue *pval, double *dval) *dval = (double) pval->u.ival; return true; } - else /* BOOLEAN or NULL */ + else /* BOOLEAN or NULL */ { fprintf(stderr, "cannot coerce %s to double\n", valueTypeName(pval)); return false; @@ -1676,7 +1676,8 @@ setDoubleValue(PgBenchValue *pv, double dval) pv->u.dval = dval; } -static bool isLazyFunc(PgBenchFunction func) +static bool +isLazyFunc(PgBenchFunction func) { return func == PGBENCH_AND || func == PGBENCH_OR || func == PGBENCH_CASE; } @@ -1686,8 +1687,10 @@ static bool evalLazyFunc(TState *thread, CState *st, PgBenchFunction func, PgBenchExprLink *args, PgBenchValue *retval) { - PgBenchValue a1, a2; - bool ba1, ba2; + PgBenchValue a1, + a2; + bool ba1, + ba2; Assert(isLazyFunc(func) && args != NULL && args->next != NULL); @@ -1700,92 +1703,92 @@ evalLazyFunc(TState *thread, CState *st, switch (func) { - case PGBENCH_AND: - if (a1.type == PGBT_NULL) - { - setNullValue(retval); + case PGBENCH_AND: + if (a1.type == PGBT_NULL) + { + setNullValue(retval); + return true; + } + + if (!coerceToBool(&a1, &ba1)) + return false; + + if (!ba1) + { + setBoolValue(retval, false); + return true; + } + + if (!evaluateExpr(thread, st, args->expr, &a2)) + return false; + + if (a2.type == PGBT_NULL) + { + setNullValue(retval); + return true; + } + else if (!coerceToBool(&a2, &ba2)) + return false; + else + { + setBoolValue(retval, ba2); + return true; + } + return true; - } - if (!coerceToBool(&a1, &ba1)) - return false; + case PGBENCH_OR: - if (!ba1) - { - setBoolValue(retval, false); - return true; - } + if (a1.type == PGBT_NULL) + { + setNullValue(retval); + return true; + } - if (!evaluateExpr(thread, st, args->expr, &a2)) - return false; + if (!coerceToBool(&a1, &ba1)) + return false; - if (a2.type == PGBT_NULL) - { - setNullValue(retval); - return true; - } - else if (!coerceToBool(&a2, &ba2)) - return false; - else - { - setBoolValue(retval, ba2); - return true; - } + if (ba1) + { + setBoolValue(retval, true); + return true; + } - return true; + if (!evaluateExpr(thread, st, args->expr, &a2)) + return false; - case PGBENCH_OR: + if (a2.type == PGBT_NULL) + { + setNullValue(retval); + return true; + } + else if (!coerceToBool(&a2, &ba2)) + return false; + else + { + setBoolValue(retval, ba2); + return true; + } - if (a1.type == PGBT_NULL) - { - setNullValue(retval); - return true; - } + case PGBENCH_CASE: + /* when true, execute branch */ + if (valueTruth(&a1)) + return evaluateExpr(thread, st, args->expr, retval); - if (!coerceToBool(&a1, &ba1)) - return false; + /* now args contains next condition or final else expression */ + args = args->next; - if (ba1) - { - setBoolValue(retval, true); - return true; - } + /* final else case? */ + if (args->next == NULL) + return evaluateExpr(thread, st, args->expr, retval); - if (!evaluateExpr(thread, st, args->expr, &a2)) - return false; + /* no, another when, proceed */ + return evalLazyFunc(thread, st, PGBENCH_CASE, args, retval); - if (a2.type == PGBT_NULL) - { - setNullValue(retval); - return true; - } - else if (!coerceToBool(&a2, &ba2)) - return false; - else - { - setBoolValue(retval, ba2); - return true; - } - - case PGBENCH_CASE: - /* when true, execute branch */ - if (valueTruth(&a1)) - return evaluateExpr(thread, st, args->expr, retval); - - /* now args contains next condition or final else expression */ - args = args->next; - - /* final else case? */ - if (args->next == NULL) - return evaluateExpr(thread, st, args->expr, retval); - - /* no, another when, proceed */ - return evalLazyFunc(thread, st, PGBENCH_CASE, args, retval); - - default: - /* internal error, cannot get here */ - Assert(0); - break; + default: + /* internal error, cannot get here */ + Assert(0); + break; } return false; } @@ -1803,10 +1806,10 @@ evalStandardFunc(TState *thread, CState *st, PgBenchValue *retval) { /* evaluate all function arguments */ - int nargs = 0; - PgBenchValue vargs[MAX_FARGS]; + int nargs = 0; + PgBenchValue vargs[MAX_FARGS]; PgBenchExprLink *l = args; - bool has_null = false; + bool has_null = false; for (nargs = 0; nargs < MAX_FARGS && l != NULL; nargs++, l = l->next) { @@ -1984,7 +1987,8 @@ evalStandardFunc(TState *thread, CState *st, case PGBENCH_LSHIFT: case PGBENCH_RSHIFT: { - int64 li, ri; + int64 li, + ri; if (!coerceToInt(&vargs[0], &li) || !coerceToInt(&vargs[1], &ri)) return false; @@ -1999,7 +2003,7 @@ evalStandardFunc(TState *thread, CState *st, setIntValue(retval, li << ri); else if (func == PGBENCH_RSHIFT) setIntValue(retval, li >> ri); - else /* cannot get here */ + else /* cannot get here */ Assert(0); return true; @@ -2008,7 +2012,8 @@ evalStandardFunc(TState *thread, CState *st, /* logical operators */ case PGBENCH_NOT: { - bool b; + bool b; + if (!coerceToBool(&vargs[0], &b)) return false; @@ -2062,7 +2067,7 @@ evalStandardFunc(TState *thread, CState *st, fprintf(stderr, "int " INT64_FORMAT "\n", varg->u.ival); else if (varg->type == PGBT_DOUBLE) fprintf(stderr, "double %.*g\n", DBL_DIG, varg->u.dval); - else /* internal error, unexpected type */ + else /* internal error, unexpected type */ Assert(0); *retval = *varg; @@ -2275,7 +2280,11 @@ evalStandardFunc(TState *thread, CState *st, case PGBENCH_IS: { Assert(nargs == 2); - /* note: this simple implementation is more permissive than SQL */ + + /* + * note: this simple implementation is more permissive than + * SQL + */ setBoolValue(retval, vargs[0].type == vargs[1].type && vargs[0].u.bval == vargs[1].u.bval); @@ -2286,8 +2295,8 @@ evalStandardFunc(TState *thread, CState *st, case PGBENCH_HASH_FNV1A: case PGBENCH_HASH_MURMUR2: { - int64 val, - seed; + int64 val, + seed; Assert(nargs == 2); @@ -2935,7 +2944,10 @@ doCustom(TState *thread, CState *st, StatsData *agg) if (command->meta == META_ELIF && conditional_stack_peek(st->cstack) == IFSTATE_TRUE) { - /* elif after executed block, skip eval and wait for endif */ + /* + * elif after executed block, skip eval and wait + * for endif + */ conditional_stack_poke(st->cstack, IFSTATE_IGNORED); goto move_to_end_command; } @@ -2956,18 +2968,21 @@ doCustom(TState *thread, CState *st, StatsData *agg) break; } } - else /* if and elif evaluated cases */ + else /* if and elif evaluated cases */ { - bool cond = valueTruth(&result); + bool cond = valueTruth(&result); /* execute or not depending on evaluated condition */ if (command->meta == META_IF) { conditional_stack_push(st->cstack, cond ? IFSTATE_TRUE : IFSTATE_FALSE); } - else /* elif */ + else /* elif */ { - /* we should get here only if the "elif" needed evaluation */ + /* + * we should get here only if the "elif" + * needed evaluation + */ Assert(conditional_stack_peek(st->cstack) == IFSTATE_FALSE); conditional_stack_poke(st->cstack, cond ? IFSTATE_TRUE : IFSTATE_FALSE); } @@ -2981,10 +2996,10 @@ doCustom(TState *thread, CState *st, StatsData *agg) conditional_stack_poke(st->cstack, IFSTATE_ELSE_FALSE); break; case IFSTATE_FALSE: /* inconsistent if active */ - case IFSTATE_IGNORED: /* inconsistent if active */ - case IFSTATE_NONE: /* else without if */ + case IFSTATE_IGNORED: /* inconsistent if active */ + case IFSTATE_NONE: /* else without if */ case IFSTATE_ELSE_TRUE: /* else after else */ - case IFSTATE_ELSE_FALSE: /* else after else */ + case IFSTATE_ELSE_FALSE: /* else after else */ default: /* dead code if conditional check is ok */ Assert(false); @@ -3038,11 +3053,11 @@ doCustom(TState *thread, CState *st, StatsData *agg) } } - move_to_end_command: + move_to_end_command: + /* - * executing the expression or shell command might - * take a non-negligible amount of time, so reset - * 'now' + * executing the expression or shell command might take a + * non-negligible amount of time, so reset 'now' */ INSTR_TIME_SET_ZERO(now); @@ -3063,7 +3078,10 @@ doCustom(TState *thread, CState *st, StatsData *agg) /* cannot reach end of script in that state */ Assert(command != NULL); - /* if this is conditional related, update conditional state */ + /* + * if this is conditional related, update conditional + * state + */ if (command->type == META_COMMAND && (command->meta == META_IF || command->meta == META_ELIF || @@ -3072,51 +3090,59 @@ doCustom(TState *thread, CState *st, StatsData *agg) { switch (conditional_stack_peek(st->cstack)) { - case IFSTATE_FALSE: - if (command->meta == META_IF || command->meta == META_ELIF) - { - /* we must evaluate the condition */ - st->state = CSTATE_START_COMMAND; - } - else if (command->meta == META_ELSE) - { - /* we must execute next command */ - conditional_stack_poke(st->cstack, IFSTATE_ELSE_TRUE); - st->state = CSTATE_START_COMMAND; - st->command++; - } - else if (command->meta == META_ENDIF) - { - Assert(!conditional_stack_empty(st->cstack)); - conditional_stack_pop(st->cstack); - if (conditional_active(st->cstack)) + case IFSTATE_FALSE: + if (command->meta == META_IF || command->meta == META_ELIF) + { + /* we must evaluate the condition */ st->state = CSTATE_START_COMMAND; - /* else state remains in CSTATE_SKIP_COMMAND */ - st->command++; - } - break; - - case IFSTATE_IGNORED: - case IFSTATE_ELSE_FALSE: - if (command->meta == META_IF) - conditional_stack_push(st->cstack, IFSTATE_IGNORED); - else if (command->meta == META_ENDIF) - { - Assert(!conditional_stack_empty(st->cstack)); - conditional_stack_pop(st->cstack); - if (conditional_active(st->cstack)) + } + else if (command->meta == META_ELSE) + { + /* we must execute next command */ + conditional_stack_poke(st->cstack, IFSTATE_ELSE_TRUE); st->state = CSTATE_START_COMMAND; - } - /* could detect "else" & "elif" after "else" */ - st->command++; - break; + st->command++; + } + else if (command->meta == META_ENDIF) + { + Assert(!conditional_stack_empty(st->cstack)); + conditional_stack_pop(st->cstack); + if (conditional_active(st->cstack)) + st->state = CSTATE_START_COMMAND; - case IFSTATE_NONE: - case IFSTATE_TRUE: - case IFSTATE_ELSE_TRUE: - default: - /* inconsistent if inactive, unreachable dead code */ - Assert(false); + /* + * else state remains in + * CSTATE_SKIP_COMMAND + */ + st->command++; + } + break; + + case IFSTATE_IGNORED: + case IFSTATE_ELSE_FALSE: + if (command->meta == META_IF) + conditional_stack_push(st->cstack, IFSTATE_IGNORED); + else if (command->meta == META_ENDIF) + { + Assert(!conditional_stack_empty(st->cstack)); + conditional_stack_pop(st->cstack); + if (conditional_active(st->cstack)) + st->state = CSTATE_START_COMMAND; + } + /* could detect "else" & "elif" after "else" */ + st->command++; + break; + + case IFSTATE_NONE: + case IFSTATE_TRUE: + case IFSTATE_ELSE_TRUE: + default: + + /* + * inconsistent if inactive, unreachable dead + * code + */ + Assert(false); } } else @@ -4184,42 +4210,44 @@ CheckConditional(ParsedScript ps) { /* statically check conditional structure */ ConditionalStack cs = conditional_stack_create(); - int i; - for (i = 0 ; ps.commands[i] != NULL ; i++) + int i; + + for (i = 0; ps.commands[i] != NULL; i++) { - Command *cmd = ps.commands[i]; + Command *cmd = ps.commands[i]; + if (cmd->type == META_COMMAND) { switch (cmd->meta) { - case META_IF: - conditional_stack_push(cs, IFSTATE_FALSE); - break; - case META_ELIF: - if (conditional_stack_empty(cs)) - ConditionError(ps.desc, i+1, "\\elif without matching \\if"); - if (conditional_stack_peek(cs) == IFSTATE_ELSE_FALSE) - ConditionError(ps.desc, i+1, "\\elif after \\else"); - break; - case META_ELSE: - if (conditional_stack_empty(cs)) - ConditionError(ps.desc, i+1, "\\else without matching \\if"); - if (conditional_stack_peek(cs) == IFSTATE_ELSE_FALSE) - ConditionError(ps.desc, i+1, "\\else after \\else"); - conditional_stack_poke(cs, IFSTATE_ELSE_FALSE); - break; - case META_ENDIF: - if (!conditional_stack_pop(cs)) - ConditionError(ps.desc, i+1, "\\endif without matching \\if"); - break; - default: - /* ignore anything else... */ - break; + case META_IF: + conditional_stack_push(cs, IFSTATE_FALSE); + break; + case META_ELIF: + if (conditional_stack_empty(cs)) + ConditionError(ps.desc, i + 1, "\\elif without matching \\if"); + if (conditional_stack_peek(cs) == IFSTATE_ELSE_FALSE) + ConditionError(ps.desc, i + 1, "\\elif after \\else"); + break; + case META_ELSE: + if (conditional_stack_empty(cs)) + ConditionError(ps.desc, i + 1, "\\else without matching \\if"); + if (conditional_stack_peek(cs) == IFSTATE_ELSE_FALSE) + ConditionError(ps.desc, i + 1, "\\else after \\else"); + conditional_stack_poke(cs, IFSTATE_ELSE_FALSE); + break; + case META_ENDIF: + if (!conditional_stack_pop(cs)) + ConditionError(ps.desc, i + 1, "\\endif without matching \\if"); + break; + default: + /* ignore anything else... */ + break; } } } if (!conditional_stack_empty(cs)) - ConditionError(ps.desc, i+1, "\\if without matching \\endif"); + ConditionError(ps.desc, i + 1, "\\if without matching \\endif"); conditional_stack_destroy(cs); } @@ -4679,6 +4707,7 @@ set_random_seed(const char *seed) { /* rely on current time */ instr_time now; + INSTR_TIME_SET_CURRENT(now); iseed = (unsigned int) INSTR_TIME_GET_MICROSEC(now); } @@ -4698,7 +4727,8 @@ set_random_seed(const char *seed) else { /* parse seed unsigned int value */ - char garbage; + char garbage; + if (sscanf(seed, "%u%c", &iseed, &garbage) != 1) { fprintf(stderr, @@ -5307,7 +5337,7 @@ main(int argc, char **argv) if (var->value.type != PGBT_NO_VALUE) { if (!putVariableValue(&state[i], "startup", - var->name, &var->value)) + var->name, &var->value)) exit(1); } else @@ -5410,10 +5440,10 @@ main(int argc, char **argv) /* set default seed for hash functions */ if (lookupVariable(&state[0], "default_seed") == NULL) { - uint64 seed = ((uint64) (random() & 0xFFFF) << 48) | - ((uint64) (random() & 0xFFFF) << 32) | - ((uint64) (random() & 0xFFFF) << 16) | - (uint64) (random() & 0xFFFF); + uint64 seed = ((uint64) (random() & 0xFFFF) << 48) | + ((uint64) (random() & 0xFFFF) << 32) | + ((uint64) (random() & 0xFFFF) << 16) | + (uint64) (random() & 0xFFFF); for (i = 0; i < nclients; i++) if (!putVariableInt(&state[i], "startup", "default_seed", (int64) seed)) diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index 2b1c4daced..b56995925b 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -2047,8 +2047,8 @@ command_no_begin(const char *query) /* * Commands not allowed within transactions. The statements checked for - * here should be exactly those that call PreventInTransactionBlock() in the - * backend. + * here should be exactly those that call PreventInTransactionBlock() in + * the backend. */ if (wordlen == 6 && pg_strncasecmp(query, "vacuum", 6) == 0) return true; diff --git a/src/bin/psql/mainloop.c b/src/bin/psql/mainloop.c index c06ce3ca09..6caca2e575 100644 --- a/src/bin/psql/mainloop.c +++ b/src/bin/psql/mainloop.c @@ -247,9 +247,8 @@ MainLoop(FILE *source) /* * If we found a command word, check whether the rest of the line * contains only whitespace plus maybe one semicolon. If not, - * ignore the command word after all. These commands are only - * for compatibility with other SQL clients and are not - * documented. + * ignore the command word after all. These commands are only for + * compatibility with other SQL clients and are not documented. */ if (rest_of_line != NULL) { @@ -330,18 +329,17 @@ MainLoop(FILE *source) } /* - * If they typed "\q" in a place where "\q" is not active, - * supply a hint. The text is still added to the query - * buffer. + * If they typed "\q" in a place where "\q" is not active, supply + * a hint. The text is still added to the query buffer. */ if (found_q && query_buf->len != 0 && prompt_status != PROMPT_READY && prompt_status != PROMPT_CONTINUE && prompt_status != PROMPT_PAREN) #ifndef WIN32 - puts(_("Use control-D to quit.")); + puts(_("Use control-D to quit.")); #else - puts(_("Use control-C to quit.")); + puts(_("Use control-C to quit.")); #endif } diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 264728212f..b431efc983 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -1856,13 +1856,13 @@ psql_completion(const char *text, int start, int end) /* ALTER INDEX SET|RESET ( */ else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "(")) COMPLETE_WITH_LIST7("fillfactor", "recheck_on_update", - "fastupdate", "gin_pending_list_limit", /* GIN */ + "fastupdate", "gin_pending_list_limit", /* GIN */ "buffering", /* GiST */ "pages_per_range", "autosummarize" /* BRIN */ ); else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "(")) COMPLETE_WITH_LIST7("fillfactor =", "recheck_on_update =", - "fastupdate =", "gin_pending_list_limit =", /* GIN */ + "fastupdate =", "gin_pending_list_limit =", /* GIN */ "buffering =", /* GiST */ "pages_per_range =", "autosummarize =" /* BRIN */ ); @@ -2511,6 +2511,7 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, " UNION SELECT 'ON'" " UNION SELECT 'CONCURRENTLY'"); + /* * Complete ... INDEX|CONCURRENTLY [] ON with a list of relations * that can indexes can be created on @@ -3458,7 +3459,11 @@ psql_completion(const char *text, int start, int end) /* Complete SET with "TO" */ else if (Matches2("SET", MatchAny)) COMPLETE_WITH_CONST("TO"); - /* Complete ALTER DATABASE|FUNCTION||PROCEDURE|ROLE|ROUTINE|USER ... SET */ + + /* + * Complete ALTER DATABASE|FUNCTION||PROCEDURE|ROLE|ROUTINE|USER ... SET + * + */ else if (HeadMatches2("ALTER", "DATABASE|FUNCTION|PROCEDURE|ROLE|ROUTINE|USER") && TailMatches2("SET", MatchAny)) COMPLETE_WITH_LIST2("FROM CURRENT", "TO"); diff --git a/src/fe_utils/conditional.c b/src/fe_utils/conditional.c index 0af80521ce..db2a0a53b3 100644 --- a/src/fe_utils/conditional.c +++ b/src/fe_utils/conditional.c @@ -75,8 +75,9 @@ conditional_stack_depth(ConditionalStack cstack) return -1; else { - IfStackElem *p = cstack->head; + IfStackElem *p = cstack->head; int depth = 0; + while (p != NULL) { depth++; diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h index d1df3033a6..c013d60371 100644 --- a/src/include/access/gin_private.h +++ b/src/include/access/gin_private.h @@ -104,7 +104,7 @@ extern OffsetNumber gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple); extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple, GinNullCategory *category); extern void GinCheckForSerializableConflictIn(Relation relation, - HeapTuple tuple, Buffer buffer); + HeapTuple tuple, Buffer buffer); /* gininsert.c */ extern IndexBuildResult *ginbuild(Relation heap, Relation index, diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index cf88ff7cb4..695eaa1e49 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -126,7 +126,7 @@ typedef struct xl_heap_truncate Oid dbId; uint32 nrelids; uint8 flags; - Oid relids[FLEXIBLE_ARRAY_MEMBER]; + Oid relids[FLEXIBLE_ARRAY_MEMBER]; } xl_heap_truncate; #define SizeOfHeapTruncate (offsetof(xl_heap_truncate, relids)) diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index 892aeca300..15a7b4c212 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -103,10 +103,10 @@ typedef struct BTMetaPageData BlockNumber btm_fastroot; /* current "fast" root location */ uint32 btm_fastlevel; /* tree level of the "fast" root page */ /* following fields are available since page version 3 */ - TransactionId btm_oldest_btpo_xact; /* oldest btpo_xact among of - * deleted pages */ - float8 btm_last_cleanup_num_heap_tuples; /* number of heap tuples - * during last cleanup */ + TransactionId btm_oldest_btpo_xact; /* oldest btpo_xact among of deleted + * pages */ + float8 btm_last_cleanup_num_heap_tuples; /* number of heap tuples + * during last cleanup */ } BTMetaPageData; #define BTPageGetMeta(p) \ @@ -115,7 +115,7 @@ typedef struct BTMetaPageData #define BTREE_METAPAGE 0 /* first page is meta */ #define BTREE_MAGIC 0x053162 /* magic number of btree pages */ #define BTREE_VERSION 3 /* current version number */ -#define BTREE_MIN_VERSION 2 /* minimal supported version number */ +#define BTREE_MIN_VERSION 2 /* minimal supported version number */ /* * Maximum size of a btree index entry, including its tuple header. @@ -537,7 +537,7 @@ extern void _bt_finish_split(Relation rel, Buffer bbuf, BTStack stack); */ extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level); extern void _bt_update_meta_cleanup_info(Relation rel, - TransactionId oldestBtpoXact, float8 numHeapTuples); + TransactionId oldestBtpoXact, float8 numHeapTuples); extern void _bt_upgrademetapage(Page page); extern Buffer _bt_getroot(Relation rel, int access); extern Buffer _bt_gettrueroot(Relation rel); diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h index ef09611e0d..4022c14a83 100644 --- a/src/include/access/reloptions.h +++ b/src/include/access/reloptions.h @@ -51,7 +51,7 @@ typedef enum relopt_kind RELOPT_KIND_PARTITIONED = (1 << 11), /* if you add a new kind, make sure you update "last_default" too */ RELOPT_KIND_LAST_DEFAULT = RELOPT_KIND_PARTITIONED, - RELOPT_KIND_INDEX = RELOPT_KIND_BTREE|RELOPT_KIND_HASH|RELOPT_KIND_GIN|RELOPT_KIND_SPGIST, + RELOPT_KIND_INDEX = RELOPT_KIND_BTREE | RELOPT_KIND_HASH | RELOPT_KIND_GIN | RELOPT_KIND_SPGIST, /* some compilers treat enums as signed ints, so we can't use 1 << 31 */ RELOPT_KIND_MAX = (1 << 30) } relopt_kind; diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index 18c7dedd5d..e5289b8aa7 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -41,7 +41,7 @@ typedef struct ParallelHeapScanDescData * workers so far. */ bool phs_snapshot_any; /* SnapshotAny, not phs_snapshot_data? */ char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]; -} ParallelHeapScanDescData; +} ParallelHeapScanDescData; typedef struct HeapScanDescData { diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h index c5f1ee9ed9..99365c8a45 100644 --- a/src/include/access/spgist_private.h +++ b/src/include/access/spgist_private.h @@ -120,7 +120,7 @@ typedef struct SpGistState spgConfigOut config; /* filled in by opclass config method */ SpGistTypeDesc attType; /* type of values to be indexed/restored */ - SpGistTypeDesc attLeafType; /* type of leaf-tuple values */ + SpGistTypeDesc attLeafType; /* type of leaf-tuple values */ SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */ SpGistTypeDesc attLabelType; /* type of node label values */ @@ -181,7 +181,7 @@ typedef struct SpGistCache spgConfigOut config; /* filled in by opclass config method */ SpGistTypeDesc attType; /* type of values to be indexed/restored */ - SpGistTypeDesc attLeafType; /* type of leaf-tuple values */ + SpGistTypeDesc attLeafType; /* type of leaf-tuple values */ SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */ SpGistTypeDesc attLabelType; /* type of node label values */ diff --git a/src/include/access/twophase.h b/src/include/access/twophase.h index f05cde202f..0e932daa48 100644 --- a/src/include/access/twophase.h +++ b/src/include/access/twophase.h @@ -48,7 +48,7 @@ extern bool StandbyTransactionIdIsPrepared(TransactionId xid); extern TransactionId PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p); extern void ParsePrepareRecord(uint8 info, char *xlrec, - xl_xact_parsed_prepare *parsed); + xl_xact_parsed_prepare *parsed); extern void StandbyRecoverPreparedTransactions(void); extern void RecoverPreparedTransactions(void); diff --git a/src/include/access/xact.h b/src/include/access/xact.h index 3661b8d090..083e879d5c 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -312,7 +312,7 @@ typedef struct xl_xact_parsed_commit SharedInvalidationMessage *msgs; TransactionId twophase_xid; /* only for 2PC */ - char twophase_gid[GIDSIZE]; /* only for 2PC */ + char twophase_gid[GIDSIZE]; /* only for 2PC */ int nabortrels; /* only for 2PC */ RelFileNode *abortnodes; /* only for 2PC */ @@ -337,7 +337,7 @@ typedef struct xl_xact_parsed_abort RelFileNode *xnodes; TransactionId twophase_xid; /* only for 2PC */ - char twophase_gid[GIDSIZE]; /* only for 2PC */ + char twophase_gid[GIDSIZE]; /* only for 2PC */ XLogRecPtr origin_lsn; TimestampTz origin_timestamp; diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h index d1541bdfcc..dc6c415c58 100644 --- a/src/include/catalog/pg_class.h +++ b/src/include/catalog/pg_class.h @@ -67,7 +67,8 @@ CATALOG(pg_class,1259,RelationRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83,Relat bool relispopulated; /* matview currently holds query results */ char relreplident; /* see REPLICA_IDENTITY_xxx constants */ bool relispartition; /* is relation a partition? */ - Oid relrewrite; /* heap for rewrite during DDL, link to original rel */ + Oid relrewrite; /* heap for rewrite during DDL, link to + * original rel */ TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */ TransactionId relminmxid; /* all multixacts in this rel are >= this. * this is really a MultiXactId */ diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h index d46e09cad4..138de84e83 100644 --- a/src/include/commands/tablecmds.h +++ b/src/include/commands/tablecmds.h @@ -54,7 +54,7 @@ extern void CheckTableNotInUse(Relation rel, const char *stmt); extern void ExecuteTruncate(TruncateStmt *stmt); extern void ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, - DropBehavior behavior, bool restart_seqs); + DropBehavior behavior, bool restart_seqs); extern void SetRelationHasSubclass(Oid relationId, bool relhassubclass); diff --git a/src/include/common/int.h b/src/include/common/int.h index 82e38d4b7b..ff410f0eae 100644 --- a/src/include/common/int.h +++ b/src/include/common/int.h @@ -35,7 +35,7 @@ pg_add_s16_overflow(int16 a, int16 b, int16 *result) if (res > PG_INT16_MAX || res < PG_INT16_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int16) res; @@ -58,7 +58,7 @@ pg_sub_s16_overflow(int16 a, int16 b, int16 *result) if (res > PG_INT16_MAX || res < PG_INT16_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int16) res; @@ -81,7 +81,7 @@ pg_mul_s16_overflow(int16 a, int16 b, int16 *result) if (res > PG_INT16_MAX || res < PG_INT16_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int16) res; @@ -104,7 +104,7 @@ pg_add_s32_overflow(int32 a, int32 b, int32 *result) if (res > PG_INT32_MAX || res < PG_INT32_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int32) res; @@ -127,7 +127,7 @@ pg_sub_s32_overflow(int32 a, int32 b, int32 *result) if (res > PG_INT32_MAX || res < PG_INT32_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int32) res; @@ -150,7 +150,7 @@ pg_mul_s32_overflow(int32 a, int32 b, int32 *result) if (res > PG_INT32_MAX || res < PG_INT32_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int32) res; @@ -173,7 +173,7 @@ pg_add_s64_overflow(int64 a, int64 b, int64 *result) if (res > PG_INT64_MAX || res < PG_INT64_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int64) res; @@ -182,7 +182,7 @@ pg_add_s64_overflow(int64 a, int64 b, int64 *result) if ((a > 0 && b > 0 && a > PG_INT64_MAX - b) || (a < 0 && b < 0 && a < PG_INT64_MIN - b)) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = a + b; @@ -205,7 +205,7 @@ pg_sub_s64_overflow(int64 a, int64 b, int64 *result) if (res > PG_INT64_MAX || res < PG_INT64_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int64) res; @@ -214,7 +214,7 @@ pg_sub_s64_overflow(int64 a, int64 b, int64 *result) if ((a < 0 && b > 0 && a < PG_INT64_MIN + b) || (a > 0 && b < 0 && a > PG_INT64_MAX + b)) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = a - b; @@ -237,7 +237,7 @@ pg_mul_s64_overflow(int64 a, int64 b, int64 *result) if (res > PG_INT64_MAX || res < PG_INT64_MIN) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = (int64) res; @@ -262,7 +262,7 @@ pg_mul_s64_overflow(int64 a, int64 b, int64 *result) (a < 0 && b > 0 && a < PG_INT64_MIN / b) || (a < 0 && b < 0 && a < PG_INT64_MAX / b))) { - *result = 0x5EED; /* to avoid spurious warnings */ + *result = 0x5EED; /* to avoid spurious warnings */ return true; } *result = a * b; diff --git a/src/include/common/scram-common.h b/src/include/common/scram-common.h index 17373cce3a..dcb5d69078 100644 --- a/src/include/common/scram-common.h +++ b/src/include/common/scram-common.h @@ -17,7 +17,7 @@ /* Name of SCRAM mechanisms per IANA */ #define SCRAM_SHA_256_NAME "SCRAM-SHA-256" -#define SCRAM_SHA_256_PLUS_NAME "SCRAM-SHA-256-PLUS" /* with channel binding */ +#define SCRAM_SHA_256_PLUS_NAME "SCRAM-SHA-256-PLUS" /* with channel binding */ /* Channel binding types */ #define SCRAM_CHANNEL_BINDING_TLS_UNIQUE "tls-unique" diff --git a/src/include/common/string.h b/src/include/common/string.h index 63c3e81a64..78a450192e 100644 --- a/src/include/common/string.h +++ b/src/include/common/string.h @@ -12,6 +12,6 @@ extern bool pg_str_endswith(const char *str, const char *end); extern int strtoint(const char *pg_restrict str, char **pg_restrict endptr, - int base); + int base); #endif /* COMMON_STRING_H */ diff --git a/src/include/executor/execExpr.h b/src/include/executor/execExpr.h index f4617a28fa..f7b1f77616 100644 --- a/src/include/executor/execExpr.h +++ b/src/include/executor/execExpr.h @@ -692,9 +692,9 @@ extern void CheckExprStillValid(ExprState *state, ExprContext *econtext); * expression evaluation, reducing code duplication. */ extern void ExecEvalFuncExprFusage(ExprState *state, ExprEvalStep *op, - ExprContext *econtext); + ExprContext *econtext); extern void ExecEvalFuncExprStrictFusage(ExprState *state, ExprEvalStep *op, - ExprContext *econtext); + ExprContext *econtext); extern void ExecEvalParamExec(ExprState *state, ExprEvalStep *op, ExprContext *econtext); extern void ExecEvalParamExecParams(Bitmapset *params, EState *estate); diff --git a/src/include/executor/execPartition.h b/src/include/executor/execPartition.h index e81bdc4a0a..8f847050e4 100644 --- a/src/include/executor/execPartition.h +++ b/src/include/executor/execPartition.h @@ -186,9 +186,9 @@ extern int ExecFindPartition(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate); extern ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate, - ResultRelInfo *resultRelInfo, - PartitionTupleRouting *proute, - EState *estate, int partidx); + ResultRelInfo *resultRelInfo, + PartitionTupleRouting *proute, + EState *estate, int partidx); extern void ExecInitRoutingInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, @@ -204,7 +204,7 @@ extern HeapTuple ConvertPartitionTupleSlot(TupleConversionMap *map, extern void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute); extern PartitionPruneState *ExecSetupPartitionPruneState(PlanState *planstate, - List *partitionpruneinfo); + List *partitionpruneinfo); extern Bitmapset *ExecFindMatchingSubPlans(PartitionPruneState *prunestate); extern Bitmapset *ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubnodes); diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 6214d19380..a7ea3c7d10 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -433,7 +433,7 @@ extern void ExecScanReScan(ScanState *node); extern void ExecInitResultTupleSlotTL(EState *estate, PlanState *planstate); extern void ExecInitScanTupleSlot(EState *estate, ScanState *scanstate, TupleDesc tupleDesc); extern TupleTableSlot *ExecInitExtraTupleSlot(EState *estate, - TupleDesc tupleDesc); + TupleDesc tupleDesc); extern TupleTableSlot *ExecInitNullTupleSlot(EState *estate, TupleDesc tupType); extern TupleDesc ExecTypeFromTL(List *targetList, bool hasoid); diff --git a/src/include/executor/instrument.h b/src/include/executor/instrument.h index 6e3c71759b..6d0efa7222 100644 --- a/src/include/executor/instrument.h +++ b/src/include/executor/instrument.h @@ -59,8 +59,8 @@ typedef struct Instrumentation double ntuples; /* Total tuples produced */ double ntuples2; /* Secondary node-specific tuple counter */ double nloops; /* # of run cycles for this node */ - double nfiltered1; /* # tuples removed by scanqual or joinqual */ - double nfiltered2; /* # tuples removed by "other" quals */ + double nfiltered1; /* # tuples removed by scanqual or joinqual */ + double nfiltered2; /* # tuples removed by "other" quals */ BufferUsage bufusage; /* Total buffer usage */ } Instrumentation; diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h index 376fae0bbc..401fd998f7 100644 --- a/src/include/executor/spi_priv.h +++ b/src/include/executor/spi_priv.h @@ -38,8 +38,10 @@ typedef struct QueryEnvironment *queryEnv; /* query environment setup for SPI level */ /* transaction management support */ - bool atomic; /* atomic execution context, does not allow transactions */ - bool internal_xact; /* SPI-managed transaction boundary, skip cleanup */ + bool atomic; /* atomic execution context, does not allow + * transactions */ + bool internal_xact; /* SPI-managed transaction boundary, skip + * cleanup */ } _SPI_connection; /* diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index b71ec8e069..0b874d9763 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -134,7 +134,7 @@ typedef struct TupleTableSlot HeapTupleData tts_minhdr; /* workspace for minimal-tuple-only case */ #define FIELDNO_TUPLETABLESLOT_OFF 14 uint32 tts_off; /* saved state for slot_deform_tuple */ - bool tts_fixedTupleDescriptor; /* descriptor can't be changed */ + bool tts_fixedTupleDescriptor; /* descriptor can't be changed */ } TupleTableSlot; #define TTS_HAS_PHYSICAL_TUPLE(slot) \ diff --git a/src/include/fe_utils/conditional.h b/src/include/fe_utils/conditional.h index 1516207197..9b91de5a3d 100644 --- a/src/include/fe_utils/conditional.h +++ b/src/include/fe_utils/conditional.h @@ -75,7 +75,7 @@ extern ConditionalStack conditional_stack_create(void); extern void conditional_stack_destroy(ConditionalStack cstack); -extern int conditional_stack_depth(ConditionalStack cstack); +extern int conditional_stack_depth(ConditionalStack cstack); extern void conditional_stack_push(ConditionalStack cstack, ifState new_state); diff --git a/src/include/libpq/libpq.h b/src/include/libpq/libpq.h index a74ad521b5..7bf06c65e9 100644 --- a/src/include/libpq/libpq.h +++ b/src/include/libpq/libpq.h @@ -106,8 +106,8 @@ extern bool SSLPreferServerCiphers; * prototypes for functions in be-secure-common.c */ extern int run_ssl_passphrase_command(const char *prompt, bool is_server_start, - char *buf, int size); + char *buf, int size); extern bool check_ssl_key_file_permissions(const char *ssl_key_file, - bool isServerStart); + bool isServerStart); #endif /* LIBPQ_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index fe93e78bee..da7f52cab0 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -502,8 +502,8 @@ typedef struct EState int es_num_root_result_relations; /* length of the array */ /* - * The following list contains ResultRelInfos created by the tuple - * routing code for partitions that don't already have one. + * The following list contains ResultRelInfos created by the tuple routing + * code for partitions that don't already have one. */ List *es_tuple_routing_result_relations; @@ -836,7 +836,8 @@ typedef struct SubPlanState MemoryContext hashtempcxt; /* temp memory context for hash tables */ ExprContext *innerecontext; /* econtext for computing inner tuples */ AttrNumber *keyColIdx; /* control data for hash tables */ - Oid *tab_eq_funcoids;/* equality func oids for table datatype(s) */ + Oid *tab_eq_funcoids; /* equality func oids for table + * datatype(s) */ FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */ FmgrInfo *tab_eq_funcs; /* equality functions for table datatype(s) */ FmgrInfo *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */ @@ -1087,8 +1088,8 @@ struct AppendState PlanState **appendplans; /* array of PlanStates for my inputs */ int as_nplans; int as_whichplan; - int as_first_partial_plan; /* Index of 'appendplans' containing - * the first partial plan */ + int as_first_partial_plan; /* Index of 'appendplans' containing + * the first partial plan */ ParallelAppendState *as_pstate; /* parallel coordination info */ Size pstate_len; /* size of parallel coordination info */ struct PartitionPruneState *as_prune_state; @@ -1958,8 +1959,8 @@ typedef struct WindowAggState WindowStatePerFunc perfunc; /* per-window-function information */ WindowStatePerAgg peragg; /* per-plain-aggregate information */ - ExprState *partEqfunction; /* equality funcs for partition columns */ - ExprState *ordEqfunction; /* equality funcs for ordering columns */ + ExprState *partEqfunction; /* equality funcs for partition columns */ + ExprState *ordEqfunction; /* equality funcs for ordering columns */ Tuplestorestate *buffer; /* stores rows of current partition */ int current_ptr; /* read pointer # for current row */ int framehead_ptr; /* read pointer # for frame head, if used */ @@ -2037,7 +2038,7 @@ typedef struct WindowAggState typedef struct UniqueState { PlanState ps; /* its first field is NodeTag */ - ExprState *eqfunction; /* tuple equality qual */ + ExprState *eqfunction; /* tuple equality qual */ } UniqueState; /* ---------------- diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index cbbe065078..6390f7e8c1 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -655,8 +655,8 @@ typedef struct ColumnDef Node *raw_default; /* default value (untransformed parse tree) */ Node *cooked_default; /* default value (transformed expr tree) */ char identity; /* attidentity setting */ - RangeVar *identitySequence; /* to store identity sequence name for ALTER - * TABLE ... ADD COLUMN */ + RangeVar *identitySequence; /* to store identity sequence name for + * ALTER TABLE ... ADD COLUMN */ CollateClause *collClause; /* untransformed COLLATE spec, if any */ Oid collOid; /* collation OID (InvalidOid if not set) */ List *constraints; /* other constraints on column */ @@ -2974,7 +2974,7 @@ typedef struct TransactionStmt NodeTag type; TransactionStmtKind kind; /* see above */ List *options; /* for BEGIN/START commands */ - char *savepoint_name; /* for savepoint commands */ + char *savepoint_name; /* for savepoint commands */ char *gid; /* for two-phase-commit related commands */ } TransactionStmt; diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index 8b153a9d4e..3b28d1994f 100644 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -309,9 +309,9 @@ typedef struct PlannerInfo Index qual_security_level; /* minimum security_level for quals */ /* Note: qual_security_level is zero if there are no securityQuals */ - InheritanceKind inhTargetKind; /* indicates if the target relation is an - * inheritance child or partition or a - * partitioned table */ + InheritanceKind inhTargetKind; /* indicates if the target relation is an + * inheritance child or partition or a + * partitioned table */ bool hasJoinRTEs; /* true if any RTEs are RTE_JOIN kind */ bool hasLateralRTEs; /* true if any RTEs are marked LATERAL */ bool hasDeletedRTEs; /* true if any RTE was deleted from jointree */ diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h index 55e6a8488f..77ca7ff837 100644 --- a/src/include/optimizer/cost.h +++ b/src/include/optimizer/cost.h @@ -54,7 +54,7 @@ extern PGDLLIMPORT double parallel_tuple_cost; extern PGDLLIMPORT double parallel_setup_cost; extern PGDLLIMPORT int effective_cache_size; extern PGDLLIMPORT Cost disable_cost; -extern PGDLLIMPORT int max_parallel_workers_per_gather; +extern PGDLLIMPORT int max_parallel_workers_per_gather; extern PGDLLIMPORT bool enable_seqscan; extern PGDLLIMPORT bool enable_indexscan; extern PGDLLIMPORT bool enable_indexonlyscan; @@ -72,7 +72,7 @@ extern PGDLLIMPORT bool enable_partitionwise_aggregate; extern PGDLLIMPORT bool enable_parallel_append; extern PGDLLIMPORT bool enable_parallel_hash; extern PGDLLIMPORT bool enable_partition_pruning; -extern PGDLLIMPORT int constraint_exclusion; +extern PGDLLIMPORT int constraint_exclusion; extern double clamp_row_est(double nrows); extern double index_pages_fetched(double tuples_fetched, BlockNumber pages, diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h index f181586a53..cafde307ad 100644 --- a/src/include/optimizer/paths.h +++ b/src/include/optimizer/paths.h @@ -21,9 +21,9 @@ * allpaths.c */ extern PGDLLIMPORT bool enable_geqo; -extern PGDLLIMPORT int geqo_threshold; -extern PGDLLIMPORT int min_parallel_table_scan_size; -extern PGDLLIMPORT int min_parallel_index_scan_size; +extern PGDLLIMPORT int geqo_threshold; +extern PGDLLIMPORT int min_parallel_table_scan_size; +extern PGDLLIMPORT int min_parallel_index_scan_size; /* Hook for plugins to get control in set_rel_pathlist() */ typedef void (*set_rel_pathlist_hook_type) (PlannerInfo *root, @@ -60,7 +60,7 @@ extern int compute_parallel_worker(RelOptInfo *rel, double heap_pages, extern void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual); extern void generate_partitionwise_join_paths(PlannerInfo *root, - RelOptInfo *rel); + RelOptInfo *rel); #ifdef OPTIMIZER_DEBUG extern void debug_print_rel(PlannerInfo *root, RelOptInfo *rel); diff --git a/src/include/parser/parse_func.h b/src/include/parser/parse_func.h index 2e3810fc32..11f9046e38 100644 --- a/src/include/parser/parse_func.h +++ b/src/include/parser/parse_func.h @@ -32,8 +32,8 @@ typedef enum extern Node *ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, - Node *last_srf, FuncCall *fn, bool proc_call, - int location); + Node *last_srf, FuncCall *fn, bool proc_call, + int location); extern FuncDetailCode func_get_detail(List *funcname, List *fargs, List *fargnames, diff --git a/src/include/replication/logical.h b/src/include/replication/logical.h index 619c5f4d73..c25ac1fa85 100644 --- a/src/include/replication/logical.h +++ b/src/include/replication/logical.h @@ -46,11 +46,11 @@ typedef struct LogicalDecodingContext struct SnapBuild *snapshot_builder; /* - * Marks the logical decoding context as fast forward decoding one. - * Such a context does not have plugin loaded so most of the the following + * Marks the logical decoding context as fast forward decoding one. Such a + * context does not have plugin loaded so most of the the following * properties are unused. */ - bool fast_forward; + bool fast_forward; OutputPluginCallbacks callbacks; OutputPluginOptions options; diff --git a/src/include/replication/logicalproto.h b/src/include/replication/logicalproto.h index 92e88d3127..8192f79ce3 100644 --- a/src/include/replication/logicalproto.h +++ b/src/include/replication/logicalproto.h @@ -98,9 +98,9 @@ extern void logicalrep_write_delete(StringInfo out, Relation rel, extern LogicalRepRelId logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup); extern void logicalrep_write_truncate(StringInfo out, int nrelids, Oid relids[], - bool cascade, bool restart_seqs); + bool cascade, bool restart_seqs); extern List *logicalrep_read_truncate(StringInfo in, - bool *cascade, bool *restart_seqs); + bool *cascade, bool *restart_seqs); extern void logicalrep_write_rel(StringInfo out, Relation rel); extern LogicalRepRelation *logicalrep_read_rel(StringInfo in); extern void logicalrep_write_typ(StringInfo out, Oid typoid); diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 3867ce8950..1c7982958e 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -101,8 +101,8 @@ typedef struct ReorderBufferChange } tp; /* - * Truncate data for REORDER_BUFFER_CHANGE_TRUNCATE representing - * one set of relations to be truncated. + * Truncate data for REORDER_BUFFER_CHANGE_TRUNCATE representing one + * set of relations to be truncated. */ struct { @@ -110,7 +110,7 @@ typedef struct ReorderBufferChange bool cascade; bool restart_seqs; Oid *relids; - } truncate; + } truncate; /* Message with arbitrary data. */ struct diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h index 76268ceb23..5913b580c2 100644 --- a/src/include/replication/walreceiver.h +++ b/src/include/replication/walreceiver.h @@ -110,8 +110,8 @@ typedef struct char conninfo[MAXCONNINFO]; /* - * Host name (this can be a host name, an IP address, or a directory - * path) and port number of the active replication connection. + * Host name (this can be a host name, an IP address, or a directory path) + * and port number of the active replication connection. */ char sender_host[NI_MAXHOST]; int sender_port; @@ -206,8 +206,8 @@ typedef WalReceiverConn *(*walrcv_connect_fn) (const char *conninfo, bool logica typedef void (*walrcv_check_conninfo_fn) (const char *conninfo); typedef char *(*walrcv_get_conninfo_fn) (WalReceiverConn *conn); typedef void (*walrcv_get_senderinfo_fn) (WalReceiverConn *conn, - char **sender_host, - int *sender_port); + char **sender_host, + int *sender_port); typedef char *(*walrcv_identify_system_fn) (WalReceiverConn *conn, TimeLineID *primary_tli, int *server_version); diff --git a/src/include/storage/reinit.h b/src/include/storage/reinit.h index 22639dab4c..a62703c647 100644 --- a/src/include/storage/reinit.h +++ b/src/include/storage/reinit.h @@ -20,7 +20,7 @@ extern void ResetUnloggedRelations(int op); extern bool parse_filename_for_nontemp_relation(const char *name, - int *oidchars, ForkNumber *fork); + int *oidchars, ForkNumber *fork); #define UNLOGGED_RELATION_CLEANUP 0x0001 #define UNLOGGED_RELATION_INIT 0x0002 diff --git a/src/include/tcop/utility.h b/src/include/tcop/utility.h index 880d19311a..da6767917c 100644 --- a/src/include/tcop/utility.h +++ b/src/include/tcop/utility.h @@ -20,7 +20,8 @@ typedef enum { PROCESS_UTILITY_TOPLEVEL, /* toplevel interactive command */ PROCESS_UTILITY_QUERY, /* a complete query, but not toplevel */ - PROCESS_UTILITY_QUERY_NONATOMIC, /* a complete query, nonatomic execution context */ + PROCESS_UTILITY_QUERY_NONATOMIC, /* a complete query, nonatomic + * execution context */ PROCESS_UTILITY_SUBCOMMAND /* a portion of a query */ } ProcessUtilityContext; diff --git a/src/include/tsearch/ts_utils.h b/src/include/tsearch/ts_utils.h index 73e969fe9c..d59e38c36b 100644 --- a/src/include/tsearch/ts_utils.h +++ b/src/include/tsearch/ts_utils.h @@ -62,9 +62,9 @@ typedef void (*PushFunction) (Datum opaque, TSQueryParserState state, #define P_TSQ_WEB (1 << 1) extern TSQuery parse_tsquery(char *buf, - PushFunction pushval, - Datum opaque, - int flags); + PushFunction pushval, + Datum opaque, + int flags); /* Functions for use by PushFunction implementations */ extern void pushValue(TSQueryParserState state, diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h index b28201c2bc..6b483a15a6 100644 --- a/src/include/utils/jsonapi.h +++ b/src/include/utils/jsonapi.h @@ -136,12 +136,13 @@ extern bool IsValidJsonNumber(const char *str, int len); * Flag types for iterate_json(b)_values to specify what elements from a * json(b) document we want to iterate. */ -typedef enum JsonToIndex { - jtiKey = 0x01, - jtiString = 0x02, - jtiNumeric = 0x04, - jtiBool = 0x08, - jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool +typedef enum JsonToIndex +{ + jtiKey = 0x01, + jtiString = 0x02, + jtiNumeric = 0x04, + jtiBool = 0x08, + jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool } JsonToIndex; /* an action that will be applied to each value in iterate_json(b)_vaues functions */ @@ -152,9 +153,9 @@ typedef text *(*JsonTransformStringValuesAction) (void *state, char *elem_value, extern uint32 parse_jsonb_index_flags(Jsonb *jb); extern void iterate_jsonb_values(Jsonb *jb, uint32 flags, void *state, - JsonIterateStringValuesAction action); + JsonIterateStringValuesAction action); extern void iterate_json_values(text *json, uint32 flags, void *action_state, - JsonIterateStringValuesAction action); + JsonIterateStringValuesAction action); extern Jsonb *transform_jsonb_string_values(Jsonb *jsonb, void *action_state, JsonTransformStringValuesAction transform_action); extern text *transform_json_string_values(text *json, void *action_state, diff --git a/src/include/utils/partcache.h b/src/include/utils/partcache.h index c1d029fdb3..873c60fafd 100644 --- a/src/include/utils/partcache.h +++ b/src/include/utils/partcache.h @@ -44,7 +44,7 @@ typedef struct PartitionKeyData bool *parttypbyval; char *parttypalign; Oid *parttypcoll; -} PartitionKeyData; +} PartitionKeyData; extern void RelationBuildPartitionKey(Relation relation); extern void RelationBuildPartitionDesc(Relation rel); diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h index b4a259dcf8..e4929b936e 100644 --- a/src/include/utils/portal.h +++ b/src/include/utils/portal.h @@ -116,7 +116,7 @@ typedef struct PortalData /* Bookkeeping data */ const char *name; /* portal's name */ const char *prepStmtName; /* source prepared statement (NULL if none) */ - MemoryContext portalContext;/* subsidiary memory for portal */ + MemoryContext portalContext; /* subsidiary memory for portal */ ResourceOwner resowner; /* resources owned by portal */ void (*cleanup) (Portal portal); /* cleanup hook */ diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 1d0461d295..c97f9d1b43 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -116,7 +116,7 @@ typedef struct RelationData Bitmapset *rd_keyattr; /* cols that can be ref'd by foreign keys */ Bitmapset *rd_pkattr; /* cols included in primary key */ Bitmapset *rd_idattr; /* included in replica identity index */ - Bitmapset *rd_projidx; /* Oids of projection indexes */ + Bitmapset *rd_projidx; /* Oids of projection indexes */ PublicationActions *rd_pubactions; /* publication actions */ @@ -223,7 +223,7 @@ typedef struct ForeignKeyCacheInfo typedef struct GenericIndexOpts { int32 vl_len_; - bool recheck_on_update; + bool recheck_on_update; } GenericIndexOpts; /* diff --git a/src/include/utils/resowner_private.h b/src/include/utils/resowner_private.h index 44dc99eb26..a6e8eb71ab 100644 --- a/src/include/utils/resowner_private.h +++ b/src/include/utils/resowner_private.h @@ -91,8 +91,8 @@ extern void ResourceOwnerForgetDSM(ResourceOwner owner, /* support for JITContext management */ extern void ResourceOwnerEnlargeJIT(ResourceOwner owner); extern void ResourceOwnerRememberJIT(ResourceOwner owner, - Datum handle); + Datum handle); extern void ResourceOwnerForgetJIT(ResourceOwner owner, - Datum handle); + Datum handle); #endif /* RESOWNER_PRIVATE_H */ diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h index d2e6754f04..32908b6625 100644 --- a/src/include/utils/tuplesort.h +++ b/src/include/utils/tuplesort.h @@ -54,7 +54,7 @@ typedef struct SortCoordinateData /* Private opaque state (points to shared memory) */ Sharedsort *sharedsort; -} SortCoordinateData; +} SortCoordinateData; typedef struct SortCoordinateData *SortCoordinate; diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c index b43b36260a..bd8553f1f5 100644 --- a/src/interfaces/ecpg/ecpglib/data.c +++ b/src/interfaces/ecpg/ecpglib/data.c @@ -464,15 +464,22 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, if (varcharsize == 0 || varcharsize > size) { - /* compatibility mode, blank pad and null terminate char array */ + /* + * compatibility mode, blank pad and null + * terminate char array + */ if (ORACLE_MODE(compat) && (type == ECPGt_char || type == ECPGt_unsigned_char)) { memset(str, ' ', varcharsize); memcpy(str, pval, size); - str[varcharsize-1] = '\0'; + str[varcharsize - 1] = '\0'; - /* compatibility mode empty string gets -1 indicator but no warning */ - if (size == 0) { + /* + * compatibility mode empty string gets -1 + * indicator but no warning + */ + if (size == 0) + { /* truncation */ switch (ind_type) { @@ -488,12 +495,12 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, case ECPGt_unsigned_long: *((long *) (ind + ind_offset * act_tuple)) = -1; break; - #ifdef HAVE_LONG_LONG_INT +#ifdef HAVE_LONG_LONG_INT case ECPGt_long_long: case ECPGt_unsigned_long_long: *((long long int *) (ind + ind_offset * act_tuple)) = -1; break; - #endif /* HAVE_LONG_LONG_INT */ +#endif /* HAVE_LONG_LONG_INT */ default: break; } @@ -523,7 +530,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, if (ORACLE_MODE(compat) && (varcharsize - 1) < size) { if (type == ECPGt_char || type == ECPGt_unsigned_char) - str[varcharsize-1] = '\0'; + str[varcharsize - 1] = '\0'; } if (varcharsize < size || (ORACLE_MODE(compat) && (varcharsize - 1) < size)) diff --git a/src/interfaces/ecpg/preproc/ecpg.c b/src/interfaces/ecpg/preproc/ecpg.c index 8fb731fcfb..7fdc4ee596 100644 --- a/src/interfaces/ecpg/preproc/ecpg.c +++ b/src/interfaces/ecpg/preproc/ecpg.c @@ -479,7 +479,8 @@ main(int argc, char *const argv[]) } } - if (output_filename && out_option == 0) { + if (output_filename && out_option == 0) + { free(output_filename); output_filename = NULL; } diff --git a/src/interfaces/ecpg/preproc/type.c b/src/interfaces/ecpg/preproc/type.c index fa1a05c302..253873dd4e 100644 --- a/src/interfaces/ecpg/preproc/type.c +++ b/src/interfaces/ecpg/preproc/type.c @@ -611,14 +611,16 @@ ECPGdump_a_struct(FILE *o, const char *name, const char *ind_name, char *arrsize if (ind_p != NULL && ind_p != &struct_no_indicator) { ind_p = ind_p->next; - if (ind_p == NULL && p->next != NULL) { + if (ind_p == NULL && p->next != NULL) + { mmerror(PARSE_ERROR, ET_WARNING, "indicator struct \"%s\" has too few members", ind_name); ind_p = &struct_no_indicator; } } } - if (ind_type != NULL && ind_p != NULL && ind_p != &struct_no_indicator) { + if (ind_type != NULL && ind_p != NULL && ind_p != &struct_no_indicator) + { mmerror(PARSE_ERROR, ET_WARNING, "indicator struct \"%s\" has too many members", ind_name); } diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index f3057e9d6f..a7e969d7c1 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -266,7 +266,7 @@ static const internalPQconninfoOption PQconninfoOptions[] = { {"scram_channel_binding", NULL, DefaultSCRAMChannelBinding, NULL, "SCRAM-Channel-Binding", "D", - 21, /* sizeof("tls-server-end-point") == 21 */ + 21, /* sizeof("tls-server-end-point") == 21 */ offsetof(struct pg_conn, scram_channel_binding)}, /* diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index 2e2f1074fc..43640e3799 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -63,8 +63,8 @@ static int verify_cb(int ok, X509_STORE_CTX *ctx); static int openssl_verify_peer_name_matches_certificate_name(PGconn *conn, - ASN1_STRING *name, - char **store_name); + ASN1_STRING *name, + char **store_name); static void destroy_ssl_system(void); static int initialize_SSL(PGconn *conn); static PostgresPollingStatusType open_client_SSL(PGconn *); @@ -560,8 +560,8 @@ pgtls_verify_peer_name_matches_certificate_guts(PGconn *conn, (*names_examined)++; rc = openssl_verify_peer_name_matches_certificate_name(conn, - name->d.dNSName, - &alt_name); + name->d.dNSName, + &alt_name); if (alt_name) { @@ -599,10 +599,10 @@ pgtls_verify_peer_name_matches_certificate_guts(PGconn *conn, { (*names_examined)++; rc = openssl_verify_peer_name_matches_certificate_name( - conn, - X509_NAME_ENTRY_get_data( - X509_NAME_get_entry(subject_name, cn_index)), - first_name); + conn, + X509_NAME_ENTRY_get_data( + X509_NAME_get_entry(subject_name, cn_index)), + first_name); } } } @@ -1194,6 +1194,7 @@ initialize_SSL(PGconn *conn) #ifdef SSL_OP_NO_COMPRESSION if (conn->sslcompression && conn->sslcompression[0] == '0') SSL_set_options(conn->ssl, SSL_OP_NO_COMPRESSION); + /* * Mainline OpenSSL introduced SSL_clear_options() before * SSL_OP_NO_COMPRESSION, so this following #ifdef should not be diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index eba23dcecc..9a586ff25a 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -349,7 +349,7 @@ struct pg_conn * retransmits */ char *keepalives_count; /* maximum number of TCP keepalive * retransmits */ - char *scram_channel_binding; /* SCRAM channel binding type */ + char *scram_channel_binding; /* SCRAM channel binding type */ char *sslmode; /* SSL mode (require,prefer,allow,disable) */ char *sslcompression; /* SSL compression (0 or 1) */ char *sslkey; /* client key filename */ @@ -742,8 +742,8 @@ extern char *pgtls_get_peer_certificate_hash(PGconn *conn, size_t *len); * */ extern int pgtls_verify_peer_name_matches_certificate_guts(PGconn *conn, - int *names_examined, - char **first_name); + int *names_examined, + char **first_name); /* === miscellaneous macros === */ diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index 2f41071dd3..721234d6d2 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -545,7 +545,7 @@ do_compile(FunctionCallInfo fcinfo, { if (rettypeid == VOIDOID || rettypeid == RECORDOID) - /* okay */ ; + /* okay */ ; else if (rettypeid == TRIGGEROID || rettypeid == EVTTRIGGEROID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -563,9 +563,9 @@ do_compile(FunctionCallInfo fcinfo, function->fn_rettyplen = typeStruct->typlen; /* - * install $0 reference, but only for polymorphic return - * types, and not when the return is specified through an - * output parameter. + * install $0 reference, but only for polymorphic return types, + * and not when the return is specified through an output + * parameter. */ if (IsPolymorphicType(procStruct->prorettype) && num_out_args == 0) diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index ae1898ec18..047fce372e 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -258,7 +258,7 @@ static int exec_stmt_assign(PLpgSQL_execstate *estate, static int exec_stmt_perform(PLpgSQL_execstate *estate, PLpgSQL_stmt_perform *stmt); static int exec_stmt_call(PLpgSQL_execstate *estate, - PLpgSQL_stmt_call *stmt); + PLpgSQL_stmt_call *stmt); static int exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt); static int exec_stmt_if(PLpgSQL_execstate *estate, @@ -306,7 +306,7 @@ static int exec_stmt_commit(PLpgSQL_execstate *estate, static int exec_stmt_rollback(PLpgSQL_execstate *estate, PLpgSQL_stmt_rollback *stmt); static int exec_stmt_set(PLpgSQL_execstate *estate, - PLpgSQL_stmt_set *stmt); + PLpgSQL_stmt_set *stmt); static void plpgsql_estate_setup(PLpgSQL_execstate *estate, PLpgSQL_function *func, @@ -315,8 +315,8 @@ static void plpgsql_estate_setup(PLpgSQL_execstate *estate, static void exec_eval_cleanup(PLpgSQL_execstate *estate); static void exec_prepare_plan(PLpgSQL_execstate *estate, - PLpgSQL_expr *expr, int cursorOptions, - bool keepplan); + PLpgSQL_expr *expr, int cursorOptions, + bool keepplan); static void exec_simple_check_plan(PLpgSQL_execstate *estate, PLpgSQL_expr *expr); static void exec_save_simple_expr(PLpgSQL_expr *expr, CachedPlan *cplan); static void exec_check_rw_parameter(PLpgSQL_expr *expr, int target_dno); @@ -2183,9 +2183,9 @@ exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) nfields = 0; i = 0; - foreach (lc, funcexpr->args) + foreach(lc, funcexpr->args) { - Node *n = lfirst(lc); + Node *n = lfirst(lc); if (argmodes && argmodes[i] == PROARGMODE_INOUT) { diff --git a/src/pl/plpython/plpy_exec.c b/src/pl/plpython/plpy_exec.c index 7c8c7dee87..47ed95dcc6 100644 --- a/src/pl/plpython/plpy_exec.c +++ b/src/pl/plpython/plpy_exec.c @@ -199,10 +199,10 @@ PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc) error_context_stack = &plerrcontext; /* - * For a procedure or function declared to return void, the Python return value - * must be None. For void-returning functions, we also treat a None - * return value as a special "void datum" rather than NULL (as is the - * case for non-void-returning functions). + * For a procedure or function declared to return void, the Python + * return value must be None. For void-returning functions, we also + * treat a None return value as a special "void datum" rather than + * NULL (as is the case for non-void-returning functions). */ if (proc->result.typoid == VOIDOID) { diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index 558cabc949..07fdc75127 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -312,9 +312,9 @@ static int pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp *interp, static int pltcl_subtransaction(ClientData cdata, Tcl_Interp *interp, int objc, Tcl_Obj *const objv[]); static int pltcl_commit(ClientData cdata, Tcl_Interp *interp, - int objc, Tcl_Obj *const objv[]); + int objc, Tcl_Obj *const objv[]); static int pltcl_rollback(ClientData cdata, Tcl_Interp *interp, - int objc, Tcl_Obj *const objv[]); + int objc, Tcl_Obj *const objv[]); static void pltcl_subtrans_begin(MemoryContext oldcontext, ResourceOwner oldowner); diff --git a/src/test/modules/test_bloomfilter/test_bloomfilter.c b/src/test/modules/test_bloomfilter/test_bloomfilter.c index 358afbefa3..3b04c65bd0 100644 --- a/src/test/modules/test_bloomfilter/test_bloomfilter.c +++ b/src/test/modules/test_bloomfilter/test_bloomfilter.c @@ -93,7 +93,7 @@ create_and_test_bloom(int power, int64 nelements, int callerseed) nfalsepos = nfalsepos_for_missing_strings(filter, nelements); ereport((nfalsepos > nelements * FPOSITIVE_THRESHOLD) ? WARNING : DEBUG1, - (errmsg_internal("seed: " UINT64_FORMAT " false positives: " INT64_FORMAT " (%.6f%%) bitset %.2f%% set" , + (errmsg_internal("seed: " UINT64_FORMAT " false positives: " INT64_FORMAT " (%.6f%%) bitset %.2f%% set", seed, nfalsepos, (double) nfalsepos / nelements, 100.0 * bloom_prop_bits_set(filter))));