diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 11d7ec067a..2e41b34d8d 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -25,6 +25,7 @@ #include "catalog/pg_am.h" #include "commands/vacuum.h" #include "miscadmin.h" +#include "port/pg_bitutils.h" #include "postmaster/autovacuum.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" @@ -503,10 +504,7 @@ ginHeapTupleFastCollect(GinState *ginstate, * initially. Make it a power of 2 to avoid wasting memory when * resizing (since palloc likes powers of 2). */ - collector->lentuples = 16; - while (collector->lentuples < nentries) - collector->lentuples *= 2; - + collector->lentuples = pg_nextpower2_32(Max(16, nentries)); collector->tuples = (IndexTuple *) palloc(sizeof(IndexTuple) * collector->lentuples); } else if (collector->lentuples < collector->ntuples + nentries) @@ -516,11 +514,7 @@ ginHeapTupleFastCollect(GinState *ginstate, * overflow, though we could get to a value that exceeds * MaxAllocSize/sizeof(IndexTuple), causing an error in repalloc. */ - do - { - collector->lentuples *= 2; - } while (collector->lentuples < collector->ntuples + nentries); - + collector->lentuples = pg_nextpower2_32(collector->ntuples + nentries); collector->tuples = (IndexTuple *) repalloc(collector->tuples, sizeof(IndexTuple) * collector->lentuples); } diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index b6d5084908..c881dc1de8 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -831,9 +831,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes)); dbatch = Min(dbatch, max_pointers); minbatch = (int) dbatch; - nbatch = 2; - while (nbatch < minbatch) - nbatch <<= 1; + nbatch = pg_nextpower2_32(Max(2, minbatch)); } Assert(nbuckets > 0); @@ -2272,9 +2270,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse) * MaxAllocSize/sizeof(void *)/8, but that is not currently possible * since we limit pg_statistic entries to much less than that. */ - nbuckets = 2; - while (nbuckets <= mcvsToUse) - nbuckets <<= 1; + nbuckets = pg_nextpower2_32(mcvsToUse + 1); /* use two more bits just to help avoid collisions */ nbuckets <<= 2; diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c index bd0c58cd81..80fa8c84e4 100644 --- a/src/backend/nodes/list.c +++ b/src/backend/nodes/list.c @@ -18,6 +18,7 @@ #include "postgres.h" #include "nodes/pg_list.h" +#include "port/pg_bitutils.h" #include "utils/memdebug.h" #include "utils/memutils.h" @@ -119,9 +120,7 @@ new_list(NodeTag type, int min_size) * that's more than twice the size of an existing list, so the size limits * within palloc will ensure that we don't overflow here. */ - max_size = 8; /* semi-arbitrary small power of 2 */ - while (max_size < min_size + LIST_HEADER_OVERHEAD) - max_size *= 2; + max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD)); max_size -= LIST_HEADER_OVERHEAD; #else @@ -160,12 +159,12 @@ enlarge_list(List *list, int min_size) /* * As above, we prefer power-of-two total allocations; but here we need - * not account for list header overhead. The existing max length might - * not be a power of 2, so don't rely on that. + * not account for list header overhead. */ - new_max_len = 16; /* semi-arbitrary small power of 2 */ - while (new_max_len < min_size) - new_max_len *= 2; + + /* clamp the minimum value to 16, a semi-arbitrary small power of 2 */ + new_max_len = pg_nextpower2_32(Max(16, min_size)); + #else /* As above, don't allocate anything extra */ new_max_len = min_size; diff --git a/src/backend/statistics/mvdistinct.c b/src/backend/statistics/mvdistinct.c index 977d6f3e2e..4b86f0ab2d 100644 --- a/src/backend/statistics/mvdistinct.c +++ b/src/backend/statistics/mvdistinct.c @@ -576,15 +576,7 @@ n_choose_k(int n, int k) static int num_combinations(int n) { - int k; - int ncombs = 1; - - for (k = 1; k <= n; k++) - ncombs *= 2; - - ncombs -= (n + 1); - - return ncombs; + return (1 << n) - (n + 1); } /* diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index 7a4a5aaa86..11987c8f3b 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -24,6 +24,7 @@ #include "nodes/nodeFuncs.h" #include "nodes/supportnodes.h" #include "optimizer/optimizer.h" +#include "port/pg_bitutils.h" #include "utils/array.h" #include "utils/arrayaccess.h" #include "utils/builtins.h" @@ -5313,9 +5314,7 @@ accumArrayResultArr(ArrayBuildStateArr *astate, memcpy(&astate->lbs[1], lbs, ndims * sizeof(int)); /* Allocate at least enough data space for this item */ - astate->abytes = 1024; - while (astate->abytes <= ndatabytes) - astate->abytes *= 2; + astate->abytes = pg_nextpower2_32(Max(1024, ndatabytes + 1)); astate->data = (char *) palloc(astate->abytes); } else @@ -5362,9 +5361,7 @@ accumArrayResultArr(ArrayBuildStateArr *astate, * First input with nulls; we must retrospectively handle any * previous inputs by marking all their items non-null. */ - astate->aitems = 256; - while (astate->aitems <= newnitems) - astate->aitems *= 2; + astate->aitems = pg_nextpower2_32(Max(256, newnitems + 1)); astate->nullbitmap = (bits8 *) palloc((astate->aitems + 7) / 8); array_bitmap_copy(astate->nullbitmap, 0, NULL, 0,