Rename shadowed local variables

In a similar effort to f01592f91, here we mostly rename shadowed local
variables to remove the warnings produced when compiling with
-Wshadow=compatible-local.

This fixes 63 warnings and leaves just 5.

Author: Justin Pryzby, David Rowley
Reviewed-by: Justin Pryzby
Discussion https://postgr.es/m/20220817145434.GC26426%40telsasoft.com
This commit is contained in:
David Rowley 2022-10-05 21:01:41 +13:00
parent 839c2520a7
commit 2d0bbedda7
39 changed files with 220 additions and 226 deletions

View File

@ -3059,16 +3059,16 @@ brin_minmax_multi_summary_out(PG_FUNCTION_ARGS)
char *a,
*b;
text *c;
StringInfoData str;
StringInfoData buf;
initStringInfo(&str);
initStringInfo(&buf);
a = OutputFunctionCall(&fmgrinfo, ranges_deserialized->values[idx++]);
b = OutputFunctionCall(&fmgrinfo, ranges_deserialized->values[idx++]);
appendStringInfo(&str, "%s ... %s", a, b);
appendStringInfo(&buf, "%s ... %s", a, b);
c = cstring_to_text_with_len(str.data, str.len);
c = cstring_to_text_with_len(buf.data, buf.len);
astate_values = accumArrayResult(astate_values,
PointerGetDatum(c),

View File

@ -397,7 +397,7 @@ restartScanEntry:
{
BlockNumber rootPostingTree = GinGetPostingTree(itup);
GinBtreeStack *stack;
Page page;
Page entrypage;
ItemPointerData minItem;
/*
@ -428,13 +428,13 @@ restartScanEntry:
*/
IncrBufferRefCount(entry->buffer);
page = BufferGetPage(entry->buffer);
entrypage = BufferGetPage(entry->buffer);
/*
* Load the first page into memory.
*/
ItemPointerSetMin(&minItem);
entry->list = GinDataLeafPageGetItems(page, &entry->nlist, minItem);
entry->list = GinDataLeafPageGetItems(entrypage, &entry->nlist, minItem);
entry->predictNumberResult = stack->predictNumber * entry->nlist;

View File

@ -6283,14 +6283,14 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
*/
if (ISUPDATE_from_mxstatus(members[i].status))
{
TransactionId xid = members[i].xid;
TransactionId txid = members[i].xid;
Assert(TransactionIdIsValid(xid));
if (TransactionIdPrecedes(xid, relfrozenxid))
Assert(TransactionIdIsValid(txid));
if (TransactionIdPrecedes(txid, relfrozenxid))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg_internal("found update xid %u from before relfrozenxid %u",
xid, relfrozenxid)));
txid, relfrozenxid)));
/*
* It's an update; should we keep it? If the transaction is known
@ -6304,13 +6304,13 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
* because of race conditions explained in detail in
* heapam_visibility.c.
*/
if (TransactionIdIsCurrentTransactionId(xid) ||
TransactionIdIsInProgress(xid))
if (TransactionIdIsCurrentTransactionId(txid) ||
TransactionIdIsInProgress(txid))
{
Assert(!TransactionIdIsValid(update_xid));
update_xid = xid;
update_xid = txid;
}
else if (TransactionIdDidCommit(xid))
else if (TransactionIdDidCommit(txid))
{
/*
* The transaction committed, so we can tell caller to set
@ -6319,7 +6319,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
*/
Assert(!TransactionIdIsValid(update_xid));
update_committed = true;
update_xid = xid;
update_xid = txid;
}
else
{

View File

@ -516,23 +516,23 @@ TransactionGroupUpdateXidStatus(TransactionId xid, XidStatus status,
/* Walk the list and update the status of all XIDs. */
while (nextidx != INVALID_PGPROCNO)
{
PGPROC *proc = &ProcGlobal->allProcs[nextidx];
PGPROC *nextproc = &ProcGlobal->allProcs[nextidx];
/*
* Transactions with more than THRESHOLD_SUBTRANS_CLOG_OPT sub-XIDs
* should not use group XID status update mechanism.
*/
Assert(proc->subxidStatus.count <= THRESHOLD_SUBTRANS_CLOG_OPT);
Assert(nextproc->subxidStatus.count <= THRESHOLD_SUBTRANS_CLOG_OPT);
TransactionIdSetPageStatusInternal(proc->clogGroupMemberXid,
proc->subxidStatus.count,
proc->subxids.xids,
proc->clogGroupMemberXidStatus,
proc->clogGroupMemberLsn,
proc->clogGroupMemberPage);
TransactionIdSetPageStatusInternal(nextproc->clogGroupMemberXid,
nextproc->subxidStatus.count,
nextproc->subxids.xids,
nextproc->clogGroupMemberXidStatus,
nextproc->clogGroupMemberLsn,
nextproc->clogGroupMemberPage);
/* Move to next proc in list. */
nextidx = pg_atomic_read_u32(&proc->clogGroupNext);
nextidx = pg_atomic_read_u32(&nextproc->clogGroupNext);
}
/* We're done with the lock now. */
@ -545,18 +545,18 @@ TransactionGroupUpdateXidStatus(TransactionId xid, XidStatus status,
*/
while (wakeidx != INVALID_PGPROCNO)
{
PGPROC *proc = &ProcGlobal->allProcs[wakeidx];
PGPROC *wakeproc = &ProcGlobal->allProcs[wakeidx];
wakeidx = pg_atomic_read_u32(&proc->clogGroupNext);
pg_atomic_write_u32(&proc->clogGroupNext, INVALID_PGPROCNO);
wakeidx = pg_atomic_read_u32(&wakeproc->clogGroupNext);
pg_atomic_write_u32(&wakeproc->clogGroupNext, INVALID_PGPROCNO);
/* ensure all previous writes are visible before follower continues. */
pg_write_barrier();
proc->clogGroupMember = false;
wakeproc->clogGroupMember = false;
if (proc != MyProc)
PGSemaphoreUnlock(proc->sem);
if (wakeproc != MyProc)
PGSemaphoreUnlock(wakeproc->sem);
}
return true;

View File

@ -275,12 +275,12 @@ perform_base_backup(basebackup_options *opt, bbsink *sink)
PG_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, BoolGetDatum(false));
{
ListCell *lc;
tablespaceinfo *ti;
tablespaceinfo *newti;
/* Add a node for the base directory at the end */
ti = palloc0(sizeof(tablespaceinfo));
ti->size = -1;
state.tablespaces = lappend(state.tablespaces, ti);
newti = palloc0(sizeof(tablespaceinfo));
newti->size = -1;
state.tablespaces = lappend(state.tablespaces, newti);
/*
* Calculate the total backup size by summing up the size of each

View File

@ -1818,19 +1818,19 @@ heap_drop_with_catalog(Oid relid)
*/
if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
{
Relation rel;
HeapTuple tuple;
Relation ftrel;
HeapTuple fttuple;
rel = table_open(ForeignTableRelationId, RowExclusiveLock);
ftrel = table_open(ForeignTableRelationId, RowExclusiveLock);
tuple = SearchSysCache1(FOREIGNTABLEREL, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
fttuple = SearchSysCache1(FOREIGNTABLEREL, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(fttuple))
elog(ERROR, "cache lookup failed for foreign table %u", relid);
CatalogTupleDelete(rel, &tuple->t_self);
CatalogTupleDelete(ftrel, &fttuple->t_self);
ReleaseSysCache(tuple);
table_close(rel, RowExclusiveLock);
ReleaseSysCache(fttuple);
table_close(ftrel, RowExclusiveLock);
}
/*

View File

@ -1151,10 +1151,8 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
if (argnumbers)
{
/* Re-order the argument types into call's logical order */
int i;
for (i = 0; i < pronargs; i++)
newResult->args[i] = proargtypes[argnumbers[i]];
for (int j = 0; j < pronargs; j++)
newResult->args[j] = proargtypes[argnumbers[j]];
}
else
{
@ -1163,12 +1161,10 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
}
if (variadic)
{
int i;
newResult->nvargs = effective_nargs - pronargs + 1;
/* Expand variadic argument into N copies of element type */
for (i = pronargs - 1; i < effective_nargs; i++)
newResult->args[i] = va_elem_type;
for (int j = pronargs - 1; j < effective_nargs; j++)
newResult->args[j] = va_elem_type;
}
else
newResult->nvargs = 0;

View File

@ -107,7 +107,7 @@ parse_publication_options(ParseState *pstate,
{
char *publish;
List *publish_list;
ListCell *lc;
ListCell *lc2;
if (*publish_given)
errorConflictingDefElem(defel, pstate);
@ -131,9 +131,9 @@ parse_publication_options(ParseState *pstate,
"publish")));
/* Process the option list. */
foreach(lc, publish_list)
foreach(lc2, publish_list)
{
char *publish_opt = (char *) lfirst(lc);
char *publish_opt = (char *) lfirst(lc2);
if (strcmp(publish_opt, "insert") == 0)
pubactions->pubinsert = true;

View File

@ -10223,7 +10223,7 @@ CloneFkReferencing(List **wqueue, Relation parentRel, Relation partRel)
Oid constrOid;
ObjectAddress address,
referenced;
ListCell *cell;
ListCell *lc;
Oid insertTriggerOid,
updateTriggerOid;
@ -10276,9 +10276,9 @@ CloneFkReferencing(List **wqueue, Relation parentRel, Relation partRel)
* don't need to recurse to partitions for this constraint.
*/
attached = false;
foreach(cell, partFKs)
foreach(lc, partFKs)
{
ForeignKeyCacheInfo *fk = lfirst_node(ForeignKeyCacheInfo, cell);
ForeignKeyCacheInfo *fk = lfirst_node(ForeignKeyCacheInfo, lc);
if (tryAttachPartitionForeignKey(fk,
RelationGetRelid(partRel),
@ -10877,7 +10877,7 @@ ATExecAlterConstrRecurse(Constraint *cmdcon, Relation conrel, Relation tgrel,
{
Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tgtuple);
Form_pg_trigger copy_tg;
HeapTuple copyTuple;
HeapTuple tgCopyTuple;
/*
* Remember OIDs of other relation(s) involved in FK constraint.
@ -10901,16 +10901,16 @@ ATExecAlterConstrRecurse(Constraint *cmdcon, Relation conrel, Relation tgrel,
tgform->tgfoid != F_RI_FKEY_CHECK_UPD)
continue;
copyTuple = heap_copytuple(tgtuple);
copy_tg = (Form_pg_trigger) GETSTRUCT(copyTuple);
tgCopyTuple = heap_copytuple(tgtuple);
copy_tg = (Form_pg_trigger) GETSTRUCT(tgCopyTuple);
copy_tg->tgdeferrable = cmdcon->deferrable;
copy_tg->tginitdeferred = cmdcon->initdeferred;
CatalogTupleUpdate(tgrel, &copyTuple->t_self, copyTuple);
CatalogTupleUpdate(tgrel, &tgCopyTuple->t_self, tgCopyTuple);
InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
heap_freetuple(copyTuple);
heap_freetuple(tgCopyTuple);
}
systable_endscan(tgscan);
@ -18083,14 +18083,14 @@ AttachPartitionEnsureIndexes(Relation rel, Relation attachrel)
if (!found)
{
IndexStmt *stmt;
Oid constraintOid;
Oid conOid;
stmt = generateClonedIndexStmt(NULL,
idxRel, attmap,
&constraintOid);
&conOid);
DefineIndex(RelationGetRelid(attachrel), stmt, InvalidOid,
RelationGetRelid(idxRel),
constraintOid,
conOid,
true, false, false, false, false);
}

View File

@ -1694,9 +1694,9 @@ renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
for (int i = 0; i < partdesc->nparts; i++)
{
Oid partitionId = partdesc->oids[i];
Oid partoid = partdesc->oids[i];
renametrig_partition(tgrel, partitionId, tgform->oid, newname,
renametrig_partition(tgrel, partoid, tgform->oid, newname,
NameStr(tgform->tgname));
}
}

View File

@ -3483,8 +3483,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/
if (aggnode->aggstrategy == AGG_SORTED)
{
int i = 0;
Assert(aggnode->numCols > 0);
/*
@ -3495,9 +3493,9 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
(ExprState **) palloc0(aggnode->numCols * sizeof(ExprState *));
/* for each grouping set */
for (i = 0; i < phasedata->numsets; i++)
for (int k = 0; k < phasedata->numsets; k++)
{
int length = phasedata->gset_lengths[i];
int length = phasedata->gset_lengths[k];
if (phasedata->eqfunctions[length - 1] != NULL)
continue;
@ -3576,7 +3574,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
{
Plan *outerplan = outerPlan(node);
uint64 totalGroups = 0;
int i;
aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
"HashAgg meta context",
@ -3599,8 +3596,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* when there is more than one grouping set, but should still be
* reasonable.
*/
for (i = 0; i < aggstate->num_hashes; i++)
totalGroups += aggstate->perhash[i].aggnode->numGroups;
for (int k = 0; k < aggstate->num_hashes; k++)
totalGroups += aggstate->perhash[k].aggnode->numGroups;
hash_agg_set_limits(aggstate->hashentrysize, totalGroups, 0,
&aggstate->hash_mem_limit,

View File

@ -2484,35 +2484,35 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
{
RawStmt *parsetree = plansource->raw_parse_tree;
const char *src = plansource->query_string;
List *stmt_list;
List *querytree_list;
/*
* Parameter datatypes are driven by parserSetup hook if provided,
* otherwise we use the fixed parameter list.
*/
if (parsetree == NULL)
stmt_list = NIL;
querytree_list = NIL;
else if (plan->parserSetup != NULL)
{
Assert(plan->nargs == 0);
stmt_list = pg_analyze_and_rewrite_withcb(parsetree,
src,
plan->parserSetup,
plan->parserSetupArg,
_SPI_current->queryEnv);
querytree_list = pg_analyze_and_rewrite_withcb(parsetree,
src,
plan->parserSetup,
plan->parserSetupArg,
_SPI_current->queryEnv);
}
else
{
stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
src,
plan->argtypes,
plan->nargs,
_SPI_current->queryEnv);
querytree_list = pg_analyze_and_rewrite_fixedparams(parsetree,
src,
plan->argtypes,
plan->nargs,
_SPI_current->queryEnv);
}
/* Finish filling in the CachedPlanSource */
CompleteCachedPlan(plansource,
stmt_list,
querytree_list,
NULL,
plan->argtypes,
plan->nargs,

View File

@ -2217,13 +2217,13 @@ cost_append(AppendPath *apath)
if (pathkeys == NIL)
{
Path *subpath = (Path *) linitial(apath->subpaths);
Path *firstsubpath = (Path *) linitial(apath->subpaths);
/*
* For an unordered, non-parallel-aware Append we take the startup
* cost as the startup cost of the first subpath.
*/
apath->path.startup_cost = subpath->startup_cost;
apath->path.startup_cost = firstsubpath->startup_cost;
/* Compute rows and costs as sums of subplan rows and costs. */
foreach(l, apath->subpaths)

View File

@ -1303,11 +1303,11 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
}
else
{
RestrictInfo *rinfo = castNode(RestrictInfo, orarg);
RestrictInfo *ri = castNode(RestrictInfo, orarg);
List *orargs;
Assert(!restriction_is_or_clause(rinfo));
orargs = list_make1(rinfo);
Assert(!restriction_is_or_clause(ri));
orargs = list_make1(ri);
indlist = build_paths_for_OR(root, rel,
orargs,

View File

@ -305,10 +305,10 @@ TidQualFromRestrictInfoList(PlannerInfo *root, List *rlist, RelOptInfo *rel)
}
else
{
RestrictInfo *rinfo = castNode(RestrictInfo, orarg);
RestrictInfo *ri = castNode(RestrictInfo, orarg);
Assert(!restriction_is_or_clause(rinfo));
sublist = TidQualFromRestrictInfo(root, rinfo, rel);
Assert(!restriction_is_or_clause(ri));
sublist = TidQualFromRestrictInfo(root, ri, rel);
}
/*

View File

@ -3449,7 +3449,6 @@ get_number_of_groups(PlannerInfo *root,
{
/* Add up the estimates for each grouping set */
ListCell *lc;
ListCell *lc2;
Assert(gd); /* keep Coverity happy */
@ -3458,17 +3457,18 @@ get_number_of_groups(PlannerInfo *root,
foreach(lc, gd->rollups)
{
RollupData *rollup = lfirst_node(RollupData, lc);
ListCell *lc;
ListCell *lc2;
ListCell *lc3;
groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
target_list);
rollup->numGroups = 0.0;
forboth(lc, rollup->gsets, lc2, rollup->gsets_data)
forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
{
List *gset = (List *) lfirst(lc);
GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
List *gset = (List *) lfirst(lc2);
GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
double numGroups = estimate_num_groups(root,
groupExprs,
path_rows,
@ -3484,6 +3484,8 @@ get_number_of_groups(PlannerInfo *root,
if (gd->hash_sets_idx)
{
ListCell *lc2;
gd->dNumHashGroups = 0;
groupExprs = get_sortgrouplist_exprs(parse->groupClause,

View File

@ -658,9 +658,10 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root,
/* Find the highest number of workers requested for any subpath. */
foreach(lc, partial_pathlist)
{
Path *path = lfirst(lc);
Path *subpath = lfirst(lc);
parallel_workers = Max(parallel_workers, path->parallel_workers);
parallel_workers = Max(parallel_workers,
subpath->parallel_workers);
}
Assert(parallel_workers > 0);

View File

@ -4463,16 +4463,16 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
if (!isNull)
{
Node *n;
List *querytree_list;
List *query_list;
n = stringToNode(TextDatumGetCString(tmp));
if (IsA(n, List))
querytree_list = linitial_node(List, castNode(List, n));
query_list = linitial_node(List, castNode(List, n));
else
querytree_list = list_make1(n);
if (list_length(querytree_list) != 1)
query_list = list_make1(n);
if (list_length(query_list) != 1)
goto fail;
querytree = linitial(querytree_list);
querytree = linitial(query_list);
/*
* Because we'll insist below that the querytree have an empty rtable

View File

@ -437,16 +437,16 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params)
{
Var *var = (Var *) pitem->item;
NestLoopParam *nlp;
ListCell *lc;
ListCell *lc2;
/* If not from a nestloop outer rel, complain */
if (!bms_is_member(var->varno, root->curOuterRels))
elog(ERROR, "non-LATERAL parameter required by subquery");
/* Is this param already listed in root->curOuterParams? */
foreach(lc, root->curOuterParams)
foreach(lc2, root->curOuterParams)
{
nlp = (NestLoopParam *) lfirst(lc);
nlp = (NestLoopParam *) lfirst(lc2);
if (nlp->paramno == pitem->paramId)
{
Assert(equal(var, nlp->paramval));
@ -454,7 +454,7 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params)
break;
}
}
if (lc == NULL)
if (lc2 == NULL)
{
/* No, so add it */
nlp = makeNode(NestLoopParam);
@ -467,7 +467,7 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params)
{
PlaceHolderVar *phv = (PlaceHolderVar *) pitem->item;
NestLoopParam *nlp;
ListCell *lc;
ListCell *lc2;
/* If not from a nestloop outer rel, complain */
if (!bms_is_subset(find_placeholder_info(root, phv)->ph_eval_at,
@ -475,9 +475,9 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params)
elog(ERROR, "non-LATERAL parameter required by subquery");
/* Is this param already listed in root->curOuterParams? */
foreach(lc, root->curOuterParams)
foreach(lc2, root->curOuterParams)
{
nlp = (NestLoopParam *) lfirst(lc);
nlp = (NestLoopParam *) lfirst(lc2);
if (nlp->paramno == pitem->paramId)
{
Assert(equal(phv, nlp->paramval));
@ -485,7 +485,7 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params)
break;
}
}
if (lc == NULL)
if (lc2 == NULL)
{
/* No, so add it */
nlp = makeNode(NestLoopParam);

View File

@ -539,11 +539,11 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
!fc->func_variadic &&
coldeflist == NIL)
{
ListCell *lc;
ListCell *lc2;
foreach(lc, fc->args)
foreach(lc2, fc->args)
{
Node *arg = (Node *) lfirst(lc);
Node *arg = (Node *) lfirst(lc2);
FuncCall *newfc;
last_srf = pstate->p_last_srf;

View File

@ -4321,11 +4321,11 @@ get_qual_for_range(Relation parent, PartitionBoundSpec *spec,
PartitionDesc pdesc = RelationGetPartitionDesc(parent, false);
Oid *inhoids = pdesc->oids;
int nparts = pdesc->nparts,
i;
k;
for (i = 0; i < nparts; i++)
for (k = 0; k < nparts; k++)
{
Oid inhrelid = inhoids[i];
Oid inhrelid = inhoids[k];
HeapTuple tuple;
Datum datum;
bool isnull;

View File

@ -2289,11 +2289,10 @@ match_clause_to_partition_key(GeneratePruningStepsContext *context,
elem_clauses = NIL;
foreach(lc1, elem_exprs)
{
Expr *rightop = (Expr *) lfirst(lc1),
*elem_clause;
Expr *elem_clause;
elem_clause = make_opclause(saop_op, BOOLOID, false,
leftop, rightop,
leftop, lfirst(lc1),
InvalidOid, saop_coll);
elem_clauses = lappend(elem_clauses, elem_clause);
}

View File

@ -2320,17 +2320,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
for (i = 0; i < nrelids; i++)
{
Oid relid = change->data.truncate.relids[i];
Relation relation;
Relation rel;
relation = RelationIdGetRelation(relid);
rel = RelationIdGetRelation(relid);
if (!RelationIsValid(relation))
if (!RelationIsValid(rel))
elog(ERROR, "could not open relation with OID %u", relid);
if (!RelationIsLogicallyLogged(relation))
if (!RelationIsLogicallyLogged(rel))
continue;
relations[nrelations++] = relation;
relations[nrelations++] = rel;
}
/* Apply the truncate. */

View File

@ -180,7 +180,7 @@ WalReceiverMain(void)
bool first_stream;
WalRcvData *walrcv = WalRcv;
TimestampTz last_recv_timestamp;
TimestampTz now;
TimestampTz starttime;
bool ping_sent;
char *err;
char *sender_host = NULL;
@ -192,7 +192,7 @@ WalReceiverMain(void)
*/
Assert(walrcv != NULL);
now = GetCurrentTimestamp();
starttime = GetCurrentTimestamp();
/*
* Mark walreceiver as running in shared memory.
@ -248,7 +248,7 @@ WalReceiverMain(void)
/* Initialise to a sanish value */
walrcv->lastMsgSendTime =
walrcv->lastMsgReceiptTime = walrcv->latestWalEndTime = now;
walrcv->lastMsgReceiptTime = walrcv->latestWalEndTime = starttime;
/* Report the latch to use to awaken this process */
walrcv->latch = &MyProc->procLatch;

View File

@ -1692,7 +1692,6 @@ dependencies_clauselist_selectivity(PlannerInfo *root,
{
int idx;
Node *expr;
int k;
AttrNumber unique_attnum = InvalidAttrNumber;
AttrNumber attnum;
@ -1740,15 +1739,15 @@ dependencies_clauselist_selectivity(PlannerInfo *root,
expr = (Node *) list_nth(stat->exprs, idx);
/* try to find the expression in the unique list */
for (k = 0; k < unique_exprs_cnt; k++)
for (int m = 0; m < unique_exprs_cnt; m++)
{
/*
* found a matching unique expression, use the attnum
* (derived from index of the unique expression)
*/
if (equal(unique_exprs[k], expr))
if (equal(unique_exprs[m], expr))
{
unique_attnum = -(k + 1) + attnum_offset;
unique_attnum = -(m + 1) + attnum_offset;
break;
}
}

View File

@ -3922,7 +3922,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
SHM_QUEUE *procLocks;
PROCLOCK *proclock;
PROC_QUEUE *waitQueue;
PGPROC *proc;
PGPROC *queued_proc;
int queue_size;
int i;
@ -3989,13 +3989,13 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
}
/* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
proc = (PGPROC *) waitQueue->links.next;
queued_proc = (PGPROC *) waitQueue->links.next;
for (i = 0; i < queue_size; i++)
{
if (proc == blocked_proc)
if (queued_proc == blocked_proc)
break;
data->waiter_pids[data->npids++] = proc->pid;
proc = (PGPROC *) proc->links.next;
data->waiter_pids[data->npids++] = queued_proc->pid;
queued_proc = (PGPROC *) queued_proc->links.next;
}
bproc->num_locks = data->nlocks - bproc->first_lock;

View File

@ -1450,7 +1450,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
int usecs;
long msecs;
SHM_QUEUE *procLocks;
PROCLOCK *proclock;
PROCLOCK *curproclock;
bool first_holder = true,
first_waiter = true;
int lockHoldersNum = 0;
@ -1480,44 +1480,45 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
LWLockAcquire(partitionLock, LW_SHARED);
procLocks = &(lock->procLocks);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
offsetof(PROCLOCK, lockLink));
curproclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
offsetof(PROCLOCK, lockLink));
while (proclock)
while (curproclock)
{
/*
* we are a waiter if myProc->waitProcLock == proclock; we are
* a holder if it is NULL or something different
* we are a waiter if myProc->waitProcLock == curproclock; we
* are a holder if it is NULL or something different
*/
if (proclock->tag.myProc->waitProcLock == proclock)
if (curproclock->tag.myProc->waitProcLock == curproclock)
{
if (first_waiter)
{
appendStringInfo(&lock_waiters_sbuf, "%d",
proclock->tag.myProc->pid);
curproclock->tag.myProc->pid);
first_waiter = false;
}
else
appendStringInfo(&lock_waiters_sbuf, ", %d",
proclock->tag.myProc->pid);
curproclock->tag.myProc->pid);
}
else
{
if (first_holder)
{
appendStringInfo(&lock_holders_sbuf, "%d",
proclock->tag.myProc->pid);
curproclock->tag.myProc->pid);
first_holder = false;
}
else
appendStringInfo(&lock_holders_sbuf, ", %d",
proclock->tag.myProc->pid);
curproclock->tag.myProc->pid);
lockHoldersNum++;
}
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
offsetof(PROCLOCK, lockLink));
curproclock = (PROCLOCK *) SHMQueueNext(procLocks,
&curproclock->lockLink,
offsetof(PROCLOCK, lockLink));
}
LWLockRelease(partitionLock);

View File

@ -405,12 +405,12 @@ compute_tsvector_stats(VacAttrStats *stats,
*/
for (i = 0; i < num_mcelem; i++)
{
TrackItem *item = sort_table[i];
TrackItem *titem = sort_table[i];
mcelem_values[i] =
PointerGetDatum(cstring_to_text_with_len(item->key.lexeme,
item->key.length));
mcelem_freqs[i] = (double) item->frequency / (double) nonnull_cnt;
PointerGetDatum(cstring_to_text_with_len(titem->key.lexeme,
titem->key.length));
mcelem_freqs[i] = (double) titem->frequency / (double) nonnull_cnt;
}
mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
mcelem_freqs[i] = (double) maxfreq / (double) nonnull_cnt;

View File

@ -541,12 +541,12 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
*/
for (i = 0; i < num_mcelem; i++)
{
TrackItem *item = sort_table[i];
TrackItem *titem = sort_table[i];
mcelem_values[i] = datumCopy(item->key,
mcelem_values[i] = datumCopy(titem->key,
extra_data->typbyval,
extra_data->typlen);
mcelem_freqs[i] = (double) item->frequency /
mcelem_freqs[i] = (double) titem->frequency /
(double) nonnull_cnt;
}
mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;

View File

@ -1019,17 +1019,17 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (ptype == DTK_JULIAN)
{
char *cp;
int val;
int jday;
if (tzp == NULL)
return DTERR_BAD_FORMAT;
errno = 0;
val = strtoint(field[i], &cp, 10);
if (errno == ERANGE || val < 0)
jday = strtoint(field[i], &cp, 10);
if (errno == ERANGE || jday < 0)
return DTERR_FIELD_OVERFLOW;
j2date(val, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
j2date(jday, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
isjulian = true;
/* Get the time zone from the end of the string */
@ -1181,10 +1181,10 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (ptype != 0)
{
char *cp;
int val;
int value;
errno = 0;
val = strtoint(field[i], &cp, 10);
value = strtoint(field[i], &cp, 10);
if (errno == ERANGE)
return DTERR_FIELD_OVERFLOW;
@ -1209,7 +1209,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
switch (ptype)
{
case DTK_YEAR:
tm->tm_year = val;
tm->tm_year = value;
tmask = DTK_M(YEAR);
break;
@ -1222,33 +1222,33 @@ DecodeDateTime(char **field, int *ftype, int nf,
if ((fmask & DTK_M(MONTH)) != 0 &&
(fmask & DTK_M(HOUR)) != 0)
{
tm->tm_min = val;
tm->tm_min = value;
tmask = DTK_M(MINUTE);
}
else
{
tm->tm_mon = val;
tm->tm_mon = value;
tmask = DTK_M(MONTH);
}
break;
case DTK_DAY:
tm->tm_mday = val;
tm->tm_mday = value;
tmask = DTK_M(DAY);
break;
case DTK_HOUR:
tm->tm_hour = val;
tm->tm_hour = value;
tmask = DTK_M(HOUR);
break;
case DTK_MINUTE:
tm->tm_min = val;
tm->tm_min = value;
tmask = DTK_M(MINUTE);
break;
case DTK_SECOND:
tm->tm_sec = val;
tm->tm_sec = value;
tmask = DTK_M(SECOND);
if (*cp == '.')
{
@ -1268,10 +1268,10 @@ DecodeDateTime(char **field, int *ftype, int nf,
case DTK_JULIAN:
/* previous field was a label for "julian date" */
if (val < 0)
if (value < 0)
return DTERR_FIELD_OVERFLOW;
tmask = DTK_DATE_M;
j2date(val, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
j2date(value, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
isjulian = true;
/* fractional Julian Day? */
@ -2066,7 +2066,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
if (ptype != 0)
{
char *cp;
int val;
int value;
/* Only accept a date under limited circumstances */
switch (ptype)
@ -2082,7 +2082,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
}
errno = 0;
val = strtoint(field[i], &cp, 10);
value = strtoint(field[i], &cp, 10);
if (errno == ERANGE)
return DTERR_FIELD_OVERFLOW;
@ -2107,7 +2107,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
switch (ptype)
{
case DTK_YEAR:
tm->tm_year = val;
tm->tm_year = value;
tmask = DTK_M(YEAR);
break;
@ -2120,33 +2120,33 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
if ((fmask & DTK_M(MONTH)) != 0 &&
(fmask & DTK_M(HOUR)) != 0)
{
tm->tm_min = val;
tm->tm_min = value;
tmask = DTK_M(MINUTE);
}
else
{
tm->tm_mon = val;
tm->tm_mon = value;
tmask = DTK_M(MONTH);
}
break;
case DTK_DAY:
tm->tm_mday = val;
tm->tm_mday = value;
tmask = DTK_M(DAY);
break;
case DTK_HOUR:
tm->tm_hour = val;
tm->tm_hour = value;
tmask = DTK_M(HOUR);
break;
case DTK_MINUTE:
tm->tm_min = val;
tm->tm_min = value;
tmask = DTK_M(MINUTE);
break;
case DTK_SECOND:
tm->tm_sec = val;
tm->tm_sec = value;
tmask = DTK_M(SECOND);
if (*cp == '.')
{
@ -2166,10 +2166,10 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTK_JULIAN:
/* previous field was a label for "julian date" */
if (val < 0)
if (value < 0)
return DTERR_FIELD_OVERFLOW;
tmask = DTK_DATE_M;
j2date(val, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
j2date(value, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
isjulian = true;
if (*cp == '.')

View File

@ -448,10 +448,10 @@ pg_ulltoa_n(uint64 value, char *a)
while (value >= 100000000)
{
const uint64 q = value / 100000000;
uint32 value2 = (uint32) (value - 100000000 * q);
uint32 value3 = (uint32) (value - 100000000 * q);
const uint32 c = value2 % 10000;
const uint32 d = value2 / 10000;
const uint32 c = value3 % 10000;
const uint32 d = value3 / 10000;
const uint32 c0 = (c % 100) << 1;
const uint32 c1 = (c / 100) << 1;
const uint32 d0 = (d % 100) << 1;

View File

@ -238,9 +238,9 @@ pg_partition_ancestors(PG_FUNCTION_ARGS)
if (funcctx->call_cntr < list_length(ancestors))
{
Oid relid = list_nth_oid(ancestors, funcctx->call_cntr);
Oid resultrel = list_nth_oid(ancestors, funcctx->call_cntr);
SRF_RETURN_NEXT(funcctx, ObjectIdGetDatum(relid));
SRF_RETURN_NEXT(funcctx, ObjectIdGetDatum(resultrel));
}
SRF_RETURN_DONE(funcctx);

View File

@ -8099,9 +8099,9 @@ get_parameter(Param *param, deparse_context *context)
*/
foreach(lc, context->namespaces)
{
deparse_namespace *dpns = lfirst(lc);
deparse_namespace *depns = lfirst(lc);
if (dpns->rtable_names != NIL)
if (depns->rtable_names != NIL)
{
should_qualify = true;
break;

View File

@ -3552,27 +3552,27 @@ do_connect(enum trivalue reuse_previous_specification,
param_is_newly_set(PQhost(o_conn), PQhost(pset.db)) ||
param_is_newly_set(PQport(o_conn), PQport(pset.db)))
{
char *host = PQhost(pset.db);
char *connhost = PQhost(pset.db);
char *hostaddr = PQhostaddr(pset.db);
if (is_unixsock_path(host))
if (is_unixsock_path(connhost))
{
/* hostaddr overrides host */
/* hostaddr overrides connhost */
if (hostaddr && *hostaddr)
printf(_("You are now connected to database \"%s\" as user \"%s\" on address \"%s\" at port \"%s\".\n"),
PQdb(pset.db), PQuser(pset.db), hostaddr, PQport(pset.db));
else
printf(_("You are now connected to database \"%s\" as user \"%s\" via socket in \"%s\" at port \"%s\".\n"),
PQdb(pset.db), PQuser(pset.db), host, PQport(pset.db));
PQdb(pset.db), PQuser(pset.db), connhost, PQport(pset.db));
}
else
{
if (hostaddr && *hostaddr && strcmp(host, hostaddr) != 0)
if (hostaddr && *hostaddr && strcmp(connhost, hostaddr) != 0)
printf(_("You are now connected to database \"%s\" as user \"%s\" on host \"%s\" (address \"%s\") at port \"%s\".\n"),
PQdb(pset.db), PQuser(pset.db), host, hostaddr, PQport(pset.db));
PQdb(pset.db), PQuser(pset.db), connhost, hostaddr, PQport(pset.db));
else
printf(_("You are now connected to database \"%s\" as user \"%s\" on host \"%s\" at port \"%s\".\n"),
PQdb(pset.db), PQuser(pset.db), host, PQport(pset.db));
PQdb(pset.db), PQuser(pset.db), connhost, PQport(pset.db));
}
}
else

View File

@ -546,13 +546,13 @@ SH_GROW(SH_TYPE * tb, uint64 newsize)
if (oldentry->status == SH_STATUS_IN_USE)
{
uint32 hash;
uint32 startelem;
uint32 startelem2;
uint32 curelem;
SH_ELEMENT_TYPE *newentry;
hash = SH_ENTRY_HASH(tb, oldentry);
startelem = SH_INITIAL_BUCKET(tb, hash);
curelem = startelem;
startelem2 = SH_INITIAL_BUCKET(tb, hash);
curelem = startelem2;
/* find empty element to put data into */
while (true)
@ -564,7 +564,7 @@ SH_GROW(SH_TYPE * tb, uint64 newsize)
break;
}
curelem = SH_NEXT(tb, curelem, startelem);
curelem = SH_NEXT(tb, curelem, startelem2);
}
/* copy entry to new slot */

View File

@ -367,10 +367,10 @@ ecpg_store_result(const PGresult *results, int act_field,
/* check strlen for each tuple */
for (act_tuple = 0; act_tuple < ntuples; act_tuple++)
{
int len = strlen(PQgetvalue(results, act_tuple, act_field)) + 1;
int slen = strlen(PQgetvalue(results, act_tuple, act_field)) + 1;
if (len > var->varcharsize)
var->varcharsize = len;
if (slen > var->varcharsize)
var->varcharsize = slen;
}
var->offset *= var->varcharsize;
len = var->offset * ntuples;

View File

@ -558,7 +558,7 @@ ECPGset_var(int number, void *pointer, int lineno)
ptr = (struct var_list *) calloc(1L, sizeof(struct var_list));
if (!ptr)
{
struct sqlca_t *sqlca = ECPGget_sqlca();
sqlca = ECPGget_sqlca();
if (sqlca == NULL)
{

View File

@ -1820,16 +1820,16 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (ptype == DTK_JULIAN)
{
char *cp;
int val;
int jday;
if (tzp == NULL)
return -1;
val = strtoint(field[i], &cp, 10);
jday = strtoint(field[i], &cp, 10);
if (*cp != '-')
return -1;
j2date(val, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
j2date(jday, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
/* Get the time zone from the end of the string */
if (DecodeTimezone(cp, tzp) != 0)
return -1;
@ -1958,9 +1958,9 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (ptype != 0)
{
char *cp;
int val;
int value;
val = strtoint(field[i], &cp, 10);
value = strtoint(field[i], &cp, 10);
/*
* only a few kinds are allowed to have an embedded
@ -1983,7 +1983,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
switch (ptype)
{
case DTK_YEAR:
tm->tm_year = val;
tm->tm_year = value;
tmask = DTK_M(YEAR);
break;
@ -1996,33 +1996,33 @@ DecodeDateTime(char **field, int *ftype, int nf,
if ((fmask & DTK_M(MONTH)) != 0 &&
(fmask & DTK_M(HOUR)) != 0)
{
tm->tm_min = val;
tm->tm_min = value;
tmask = DTK_M(MINUTE);
}
else
{
tm->tm_mon = val;
tm->tm_mon = value;
tmask = DTK_M(MONTH);
}
break;
case DTK_DAY:
tm->tm_mday = val;
tm->tm_mday = value;
tmask = DTK_M(DAY);
break;
case DTK_HOUR:
tm->tm_hour = val;
tm->tm_hour = value;
tmask = DTK_M(HOUR);
break;
case DTK_MINUTE:
tm->tm_min = val;
tm->tm_min = value;
tmask = DTK_M(MINUTE);
break;
case DTK_SECOND:
tm->tm_sec = val;
tm->tm_sec = value;
tmask = DTK_M(SECOND);
if (*cp == '.')
{
@ -2046,7 +2046,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
* previous field was a label for "julian date"?
***/
tmask = DTK_DATE_M;
j2date(val, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
j2date(value, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
/* fractional Julian Day? */
if (*cp == '.')
{

View File

@ -1647,13 +1647,12 @@ plpgsql_dumptree(PLpgSQL_function *func)
case PLPGSQL_DTYPE_ROW:
{
PLpgSQL_row *row = (PLpgSQL_row *) d;
int i;
printf("ROW %-16s fields", row->refname);
for (i = 0; i < row->nfields; i++)
for (int j = 0; j < row->nfields; j++)
{
printf(" %s=var %d", row->fieldnames[i],
row->varnos[i]);
printf(" %s=var %d", row->fieldnames[j],
row->varnos[j]);
}
printf("\n");
}