pgindent run for 9.5

This commit is contained in:
Bruce Momjian 2015-05-23 21:35:49 -04:00
parent 225892552b
commit 807b9e0dff
414 changed files with 5810 additions and 5308 deletions

View File

@ -209,6 +209,7 @@ leftmostvalue_int2(void)
{
return Int16GetDatum(SHRT_MIN);
}
GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp)
static Datum
@ -216,6 +217,7 @@ leftmostvalue_int4(void)
{
return Int32GetDatum(INT_MIN);
}
GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp)
static Datum
@ -226,6 +228,7 @@ leftmostvalue_int8(void)
*/
return Int64GetDatum(SEQ_MINVALUE);
}
GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp)
static Datum
@ -233,6 +236,7 @@ leftmostvalue_float4(void)
{
return Float4GetDatum(-get_float4_infinity());
}
GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp)
static Datum
@ -240,6 +244,7 @@ leftmostvalue_float8(void)
{
return Float8GetDatum(-get_float8_infinity());
}
GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp)
static Datum
@ -250,6 +255,7 @@ leftmostvalue_money(void)
*/
return Int64GetDatum(SEQ_MINVALUE);
}
GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp)
static Datum
@ -257,6 +263,7 @@ leftmostvalue_oid(void)
{
return ObjectIdGetDatum(0);
}
GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp)
static Datum
@ -264,6 +271,7 @@ leftmostvalue_timestamp(void)
{
return TimestampGetDatum(DT_NOBEGIN);
}
GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp)
GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp)
@ -273,6 +281,7 @@ leftmostvalue_time(void)
{
return TimeADTGetDatum(0);
}
GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp)
static Datum
@ -285,6 +294,7 @@ leftmostvalue_timetz(void)
return TimeTzADTPGetDatum(v);
}
GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp)
static Datum
@ -292,6 +302,7 @@ leftmostvalue_date(void)
{
return DateADTGetDatum(DATEVAL_NOBEGIN);
}
GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp)
static Datum
@ -304,6 +315,7 @@ leftmostvalue_interval(void)
v->month = 0;
return IntervalPGetDatum(v);
}
GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp)
static Datum
@ -313,6 +325,7 @@ leftmostvalue_macaddr(void)
return MacaddrPGetDatum(v);
}
GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp)
static Datum
@ -320,6 +333,7 @@ leftmostvalue_inet(void)
{
return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0"));
}
GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp)
GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp)
@ -329,6 +343,7 @@ leftmostvalue_text(void)
{
return PointerGetDatum(cstring_to_text_with_len("", 0));
}
GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp)
static Datum
@ -336,6 +351,7 @@ leftmostvalue_char(void)
{
return CharGetDatum(SCHAR_MIN);
}
GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp)
GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp)
@ -348,6 +364,7 @@ leftmostvalue_bit(void)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp)
static Datum
@ -358,6 +375,7 @@ leftmostvalue_varbit(void)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp)
/*
@ -402,4 +420,5 @@ leftmostvalue_numeric(void)
{
return PointerGetDatum(NULL);
}
GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)

View File

@ -33,6 +33,7 @@ ltree_crc32_sz(char *buf, int size)
while (size > 0)
{
char c = (char) TOLOWER(*p);
COMP_TRADITIONAL_CRC32(crc, &c, 1);
size--;
p++;

View File

@ -312,8 +312,8 @@ brin_page_items(PG_FUNCTION_ARGS)
}
/*
* If we're beyond the end of the page, set flag to end the function in
* the following iteration.
* If we're beyond the end of the page, set flag to end the function
* in the following iteration.
*/
if (state->offset > PageGetMaxOffsetNumber(state->page))
state->done = true;

View File

@ -309,7 +309,8 @@ stack_free(void *stackFree)
substatementTotal = 0;
/*
* Reset statement logged so that next statement will be logged.
* Reset statement logged so that next statement will be
* logged.
*/
statementLogged = false;
}
@ -505,10 +506,11 @@ log_audit_event(AuditEventStackItem *stackItem)
className = CLASS_ROLE;
class = LOG_ROLE;
break;
/*
* Rename and Drop are general and therefore we have to do an
* additional check against the command string to see if they
* are role or regular DDL.
* Rename and Drop are general and therefore we have to do
* an additional check against the command string to see
* if they are role or regular DDL.
*/
case T_RenameStmt:
case T_DropStmt:
@ -558,8 +560,8 @@ log_audit_event(AuditEventStackItem *stackItem)
/*
* Only log the statement if:
*
* 1. If object was selected for audit logging (granted)
* 2. The statement belongs to a class that is being logged
* 1. If object was selected for audit logging (granted) 2. The statement
* belongs to a class that is being logged
*
* If neither of these is true, return.
*/
@ -912,8 +914,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
found = true;
/*
* If we are not logging all-catalog queries (auditLogCatalog is false)
* then filter out any system relations here.
* If we are not logging all-catalog queries (auditLogCatalog is
* false) then filter out any system relations here.
*/
relOid = rte->relid;
rel = relation_open(relOid, NoLock);
@ -983,46 +985,55 @@ log_select_dml(Oid auditOid, List *rangeTabls)
case RELKIND_RELATION:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_TABLE;
break;
case RELKIND_INDEX:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_INDEX;
break;
case RELKIND_SEQUENCE:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_SEQUENCE;
break;
case RELKIND_TOASTVALUE:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_TOASTVALUE;
break;
case RELKIND_VIEW:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_VIEW;
break;
case RELKIND_COMPOSITE_TYPE:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_COMPOSITE_TYPE;
break;
case RELKIND_FOREIGN_TABLE:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_FOREIGN_TABLE;
break;
case RELKIND_MATVIEW:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_MATVIEW;
break;
default:
auditEventStack->auditEvent.objectType =
OBJECT_TYPE_UNKNOWN;
break;
}
@ -1104,8 +1115,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
/*
* If no tables were found that means that RangeTbls was empty or all
* relations were in the system schema. In that case still log a
* session record.
* relations were in the system schema. In that case still log a session
* record.
*/
if (!found)
{
@ -1159,6 +1170,7 @@ log_function_execute(Oid objectId)
stackItem->auditEvent.commandTag = T_DoStmt;
stackItem->auditEvent.command = COMMAND_EXECUTE;
stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION;
stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText;
log_audit_event(stackItem);
@ -1236,9 +1248,9 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
standard_ExecutorStart(queryDesc, eflags);
/*
* Move the stack memory context to the query memory context. This needs to
* be done here because the query context does not exist before the call
* to standard_ExecutorStart() but the stack item is required by
* Move the stack memory context to the query memory context. This needs
* to be done here because the query context does not exist before the
* call to standard_ExecutorStart() but the stack item is required by
* pg_audit_ExecutorCheckPerms_hook() which is called during
* standard_ExecutorStart().
*/
@ -1328,19 +1340,22 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
params, dest, completionTag);
/*
* Process the audit event if there is one. Also check that this event was
* not popped off the stack by a memory context being free'd elsewhere.
* Process the audit event if there is one. Also check that this event
* was not popped off the stack by a memory context being free'd
* elsewhere.
*/
if (stackItem && !IsAbortedTransactionBlockState())
{
/*
* Make sure the item we want to log is still on the stack - if not then
* something has gone wrong and an error will be raised.
* Make sure the item we want to log is still on the stack - if not
* then something has gone wrong and an error will be raised.
*/
stack_valid(stackId);
/* Log the utility command if logging is on, the command has not already
* been logged by another hook, and the transaction is not aborted.
/*
* Log the utility command if logging is on, the command has not
* already been logged by another hook, and the transaction is not
* aborted.
*/
if (auditLogBitmap != 0 && !stackItem->auditEvent.logged)
log_audit_event(stackItem);
@ -1380,7 +1395,8 @@ Datum
pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
{
EventTriggerData *eventData;
int result, row;
int result,
row;
TupleDesc spiTupDesc;
const char *query;
MemoryContext contextQuery;
@ -1448,6 +1464,7 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
/* Supply object name and type for audit event */
auditEventStack->auditEvent.objectType =
SPI_getvalue(spiTuple, spiTupDesc, 1);
auditEventStack->auditEvent.objectName =
SPI_getvalue(spiTuple, spiTupDesc, 2);
@ -1473,7 +1490,8 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
Datum
pg_audit_sql_drop(PG_FUNCTION_ARGS)
{
int result, row;
int result,
row;
TupleDesc spiTupDesc;
const char *query;
MemoryContext contextQuery;
@ -1532,6 +1550,7 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
auditEventStack->auditEvent.objectType =
SPI_getvalue(spiTuple, spiTupDesc, 1);
auditEventStack->auditEvent.objectName =
SPI_getvalue(spiTuple, spiTupDesc, 2);
@ -1847,8 +1866,8 @@ _PG_init(void)
NULL, NULL, NULL);
/*
* Install our hook functions after saving the existing pointers to preserve
* the chains.
* Install our hook functions after saving the existing pointers to
* preserve the chains.
*/
next_ExecutorStart_hook = ExecutorStart_hook;
ExecutorStart_hook = pg_audit_ExecutorStart_hook;

View File

@ -34,6 +34,7 @@ typedef struct
bool isvalid;
bool isdirty;
uint16 usagecount;
/*
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
* being pinned by too many backends and each backend will only pin once

View File

@ -1231,8 +1231,8 @@ pgss_store(const char *query, uint32 queryId,
else
{
/*
* Welford's method for accurately computing variance.
* See <http://www.johndcook.com/blog/standard_deviation/>
* Welford's method for accurately computing variance. See
* <http://www.johndcook.com/blog/standard_deviation/>
*/
double old_mean = e->counters.mean_time;
@ -1572,10 +1572,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
values[i++] = Float8GetDatumFast(tmp.min_time);
values[i++] = Float8GetDatumFast(tmp.max_time);
values[i++] = Float8GetDatumFast(tmp.mean_time);
/*
* Note we are calculating the population variance here, not the
* sample variance, as we have data for the whole population,
* so Bessel's correction is not used, and we don't divide by
* sample variance, as we have data for the whole population, so
* Bessel's correction is not used, and we don't divide by
* tmp.calls - 1.
*/
if (tmp.calls > 1)

View File

@ -259,6 +259,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
res = pgp_set_convert_crlf(ctx, atoi(val));
else if (strcmp(key, "unicode-mode") == 0)
res = pgp_set_unicode_mode(ctx, atoi(val));
/*
* The remaining options are for debugging/testing and are therefore not
* documented in the user-facing docs.

View File

@ -84,8 +84,8 @@ statapprox_heap(Relation rel, output_type *stat)
CHECK_FOR_INTERRUPTS();
/*
* If the page has only visible tuples, then we can find out the
* free space from the FSM and move on.
* If the page has only visible tuples, then we can find out the free
* space from the FSM and move on.
*/
if (visibilitymap_test(rel, blkno, &vmbuffer))
{
@ -103,8 +103,8 @@ statapprox_heap(Relation rel, output_type *stat)
page = BufferGetPage(buf);
/*
* It's not safe to call PageGetHeapFreeSpace() on new pages, so
* we treat them as being free space for our purposes.
* It's not safe to call PageGetHeapFreeSpace() on new pages, so we
* treat them as being free space for our purposes.
*/
if (!PageIsNew(page))
stat->free_space += PageGetHeapFreeSpace(page);
@ -120,9 +120,9 @@ statapprox_heap(Relation rel, output_type *stat)
scanned++;
/*
* Look at each tuple on the page and decide whether it's live
* or dead, then count it and its size. Unlike lazy_scan_heap,
* we can afford to ignore problems and special cases.
* Look at each tuple on the page and decide whether it's live or
* dead, then count it and its size. Unlike lazy_scan_heap, we can
* afford to ignore problems and special cases.
*/
maxoff = PageGetMaxOffsetNumber(page);
@ -180,6 +180,7 @@ statapprox_heap(Relation rel, output_type *stat)
}
stat->table_len = (uint64) nblocks *BLCKSZ;
stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned,
stat->tuple_count + misc_count);
@ -240,9 +241,9 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
errmsg("cannot access temporary tables of other sessions")));
/*
* We support only ordinary relations and materialised views,
* because we depend on the visibility map and free space map
* for our estimates about unscanned pages.
* We support only ordinary relations and materialised views, because we
* depend on the visibility map and free space map for our estimates about
* unscanned pages.
*/
if (!(rel->rd_rel->relkind == RELKIND_RELATION ||
rel->rd_rel->relkind == RELKIND_MATVIEW))

View File

@ -86,6 +86,7 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
/* Find relative prime as step size for linear probing. */
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
/*
* Randomize start position so that blocks close to step size don't have
* higher probability of being chosen on very short scan.

View File

@ -43,7 +43,8 @@ typedef struct
OffsetNumber lt; /* last tuple returned from current block */
BlockNumber step; /* step size */
BlockNumber lb; /* last block visited */
BlockNumber estblocks; /* estimated number of returned blocks (moving) */
BlockNumber estblocks; /* estimated number of returned blocks
* (moving) */
BlockNumber doneblocks; /* number of already returned blocks */
} SystemSamplerData;
@ -92,6 +93,7 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
/* Find relative prime as step size for linear probing. */
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
/*
* Randomize start position so that blocks close to step size don't have
* higher probability of being chosen on very short scan.
@ -269,8 +271,8 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
/*
* Assumption here is that we'll never read less than 1% of table pages,
* this is here mainly because it is much less bad to overestimate than
* underestimate and using just spc_random_page_cost will probably lead
* to underestimations in general.
* underestimate and using just spc_random_page_cost will probably lead to
* underestimations in general.
*/
*pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100));
*tuples = rint(density * (double) *pages * path->rows / baserel->tuples);

View File

@ -523,10 +523,10 @@ brinbuildCallback(Relation index,
thisblock = ItemPointerGetBlockNumber(&htup->t_self);
/*
* If we're in a block that belongs to a future range, summarize what we've
* got and start afresh. Note the scan might have skipped many pages,
* if they were devoid of live tuples; make sure to insert index tuples
* for those too.
* If we're in a block that belongs to a future range, summarize what
* we've got and start afresh. Note the scan might have skipped many
* pages, if they were devoid of live tuples; make sure to insert index
* tuples for those too.
*/
while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
{
@ -660,7 +660,6 @@ brinbuild(PG_FUNCTION_ARGS)
Datum
brinbuildempty(PG_FUNCTION_ARGS)
{
Relation index = (Relation) PG_GETARG_POINTER(0);
Buffer metabuf;

View File

@ -298,10 +298,10 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* Placement strategies
*
* These are implemented by logically negating the result of the
* converse placement operator; for this to work, the converse operator
* must be part of the opclass. An error will be thrown by
* inclusion_get_strategy_procinfo() if the required strategy is not
* part of the opclass.
* converse placement operator; for this to work, the converse
* operator must be part of the opclass. An error will be thrown
* by inclusion_get_strategy_procinfo() if the required strategy
* is not part of the opclass.
*
* These all return false if either argument is empty, so there is
* no need to check for empty elements.
@ -381,8 +381,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* strategies because some elements can be contained even though
* the union is not; instead we use the overlap operator.
*
* We check for empty elements separately as they are not merged to
* the union but contained by everything.
* We check for empty elements separately as they are not merged
* to the union but contained by everything.
*/
case RTContainedByStrategyNumber:
@ -400,8 +400,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
/*
* Adjacent strategy
*
* We test for overlap first but to be safe we need to call
* the actual adjacent operator also.
* We test for overlap first but to be safe we need to call the
* actual adjacent operator also.
*
* An empty element cannot be adjacent to any other, so there is
* no need to check for it.
@ -426,8 +426,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* the contains operator. Generally, inequality strategies do not
* make much sense for the types which will be used with the
* inclusion BRIN family of opclasses, but is is possible to
* implement them with logical negation of the left-of and right-of
* operators.
* implement them with logical negation of the left-of and
* right-of operators.
*
* NB: These strategies cannot be used with geometric datatypes
* that use comparison of areas! The only exception is the "same"

View File

@ -260,10 +260,10 @@ brin_minmax_union(PG_FUNCTION_ARGS)
attr = bdesc->bd_tupdesc->attrs[attno - 1];
/*
* Adjust "allnulls". If A doesn't have values, just copy the values
* from B into A, and we're done. We cannot run the operators in this
* case, because values in A might contain garbage. Note we already
* established that B contains values.
* Adjust "allnulls". If A doesn't have values, just copy the values from
* B into A, and we're done. We cannot run the operators in this case,
* because values in A might contain garbage. Note we already established
* that B contains values.
*/
if (col_a->bv_allnulls)
{

View File

@ -1785,7 +1785,8 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* Set up the scan keys, and check for unsatisfiable query.
*/
ginFreeScanKeys(so); /* there should be no keys yet, but just to be sure */
ginFreeScanKeys(so); /* there should be no keys yet, but just to be
* sure */
ginNewScanKey(scan);
if (GinIsVoidRes(scan))

View File

@ -154,8 +154,8 @@ gistrescan(PG_FUNCTION_ARGS)
}
/*
* If we're doing an index-only scan, on the first call, also initialize
* a tuple descriptor to represent the returned index tuples and create a
* If we're doing an index-only scan, on the first call, also initialize a
* tuple descriptor to represent the returned index tuples and create a
* memory context to hold them during the scan.
*/
if (scan->xs_want_itup && !scan->xs_itupdesc)
@ -288,9 +288,9 @@ gistrescan(PG_FUNCTION_ARGS)
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
/*
* Look up the datatype returned by the original ordering operator.
* GiST always uses a float8 for the distance function, but the
* ordering operator could be anything else.
* Look up the datatype returned by the original ordering
* operator. GiST always uses a float8 for the distance function,
* but the ordering operator could be anything else.
*
* XXX: The distance function is only allowed to be lossy if the
* ordering operator's result type is float4 or float8. Otherwise

View File

@ -2284,9 +2284,9 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
{
/*
* For now, parallel operations are required to be strictly read-only.
* Unlike heap_update() and heap_delete(), an insert should never create
* a combo CID, so it might be possible to relax this restriction, but
* not without more thought and testing.
* Unlike heap_update() and heap_delete(), an insert should never create a
* combo CID, so it might be possible to relax this restriction, but not
* without more thought and testing.
*/
if (IsInParallelMode())
ereport(ERROR,
@ -2768,8 +2768,8 @@ l1:
infomask = tp.t_data->t_infomask;
/*
* Sleep until concurrent transaction ends -- except when there's a single
* locker and it's our own transaction. Note we don't care
* Sleep until concurrent transaction ends -- except when there's a
* single locker and it's our own transaction. Note we don't care
* which lock mode the locker has, because we need the strongest one.
*
* Before sleeping, we need to acquire tuple lock to establish our
@ -2822,8 +2822,8 @@ l1:
else if (!TransactionIdIsCurrentTransactionId(xwait))
{
/*
* Wait for regular transaction to end; but first, acquire
* tuple lock.
* Wait for regular transaction to end; but first, acquire tuple
* lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
@ -3336,8 +3336,8 @@ l2:
*
* Before sleeping, we need to acquire tuple lock to establish our
* priority for the tuple (see heap_lock_tuple). LockTuple will
* release us when we are next-in-line for the tuple. Note we must not
* acquire the tuple lock until we're sure we're going to sleep;
* release us when we are next-in-line for the tuple. Note we must
* not acquire the tuple lock until we're sure we're going to sleep;
* otherwise we're open for race conditions with other transactions
* holding the tuple lock which sleep on us.
*
@ -3425,9 +3425,9 @@ l2:
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
/*
* If it's just a key-share locker, and we're not changing the
* key columns, we don't need to wait for it to end; but we
* need to preserve it as locker.
* If it's just a key-share locker, and we're not changing the key
* columns, we don't need to wait for it to end; but we need to
* preserve it as locker.
*/
checked_lockers = true;
locker_remains = true;
@ -3436,8 +3436,8 @@ l2:
else
{
/*
* Wait for regular transaction to end; but first, acquire
* tuple lock.
* Wait for regular transaction to end; but first, acquire tuple
* lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
@ -4501,8 +4501,8 @@ l3:
* for the tuple. We must do this even if we are share-locking.
*
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while rechecking
* tuple state.
* this arranges that we stay at the head of the line while
* rechecking tuple state.
*/
if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
&have_tuple_lock))
@ -4613,9 +4613,9 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
* this point. Check for xmax change, and start over if so.
* xwait is done, but if xwait had just locked the tuple then some
* other xact could update this tuple before we get to this point.
* Check for xmax change, and start over if so.
*/
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@ -4628,9 +4628,9 @@ l3:
* Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that would have been handled above. So
* that transaction must necessarily be gone by now. But don't
* check for this in the multixact case, because some locker
* transactions might still be running.
* that transaction must necessarily be gone by now. But
* don't check for this in the multixact case, because some
* locker transactions might still be running.
*/
UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
}
@ -5513,8 +5513,8 @@ heap_finish_speculative(Relation relation, HeapTuple tuple)
MarkBufferDirty(buffer);
/*
* Replace the speculative insertion token with a real t_ctid,
* pointing to itself like it does on regular tuples.
* Replace the speculative insertion token with a real t_ctid, pointing to
* itself like it does on regular tuples.
*/
htup->t_ctid = tuple->t_self;
@ -7289,8 +7289,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
{
/*
* The OID column can appear in an index definition, but that's
* OK, because we always copy the OID if present (see below). Other
* system columns may not.
* OK, because we always copy the OID if present (see below).
* Other system columns may not.
*/
if (attno == ObjectIdAttributeNumber)
continue;

View File

@ -60,9 +60,9 @@ RelationPutHeapTuple(Relation relation,
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
/*
* Insert the correct position into CTID of the stored tuple, too
* (unless this is a speculative insertion, in which case the token is
* held in CTID field instead)
* Insert the correct position into CTID of the stored tuple, too (unless
* this is a speculative insertion, in which case the token is held in
* CTID field instead)
*/
if (!token)
{

View File

@ -185,11 +185,11 @@ BuildIndexValueDescription(Relation indexRelation,
* Check permissions- if the user does not have access to view all of the
* key columns then return NULL to avoid leaking data.
*
* First check if RLS is enabled for the relation. If so, return NULL
* to avoid leaking data.
* First check if RLS is enabled for the relation. If so, return NULL to
* avoid leaking data.
*
* Next we need to check table-level SELECT access and then, if
* there is no access there, check column-level permissions.
* Next we need to check table-level SELECT access and then, if there is
* no access there, check column-level permissions.
*/
/*
@ -215,18 +215,18 @@ BuildIndexValueDescription(Relation indexRelation,
if (aclresult != ACLCHECK_OK)
{
/*
* No table-level access, so step through the columns in the
* index and make sure the user has SELECT rights on all of them.
* No table-level access, so step through the columns in the index and
* make sure the user has SELECT rights on all of them.
*/
for (keyno = 0; keyno < idxrec->indnatts; keyno++)
{
AttrNumber attnum = idxrec->indkey.values[keyno];
/*
* Note that if attnum == InvalidAttrNumber, then this is an
* index based on an expression and we return no detail rather
* than try to figure out what column(s) the expression includes
* and if the user has SELECT rights on them.
* Note that if attnum == InvalidAttrNumber, then this is an index
* based on an expression and we return no detail rather than try
* to figure out what column(s) the expression includes and if the
* user has SELECT rights on them.
*/
if (attnum == InvalidAttrNumber ||
pg_attribute_aclcheck(indrelid, attnum, GetUserId(),

View File

@ -171,9 +171,10 @@ top:
{
/* Have to wait for the other guy ... */
_bt_relbuf(rel, buf);
/*
* If it's a speculative insertion, wait for it to finish (ie.
* to go ahead with the insertion, or kill the tuple). Otherwise
* If it's a speculative insertion, wait for it to finish (ie. to
* go ahead with the insertion, or kill the tuple). Otherwise
* wait for the transaction to finish as usual.
*/
if (speculativeToken)

View File

@ -1233,6 +1233,7 @@ _bt_pagedel(Relation rel, Buffer buf)
lbuf = _bt_getbuf(rel, leftsib, BT_READ);
lpage = BufferGetPage(lbuf);
lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
/*
* If the left sibling is split again by another backend,
* after we released the lock, we know that the first
@ -1345,11 +1346,11 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
leafrightsib = opaque->btpo_next;
/*
* Before attempting to lock the parent page, check that the right
* sibling is not in half-dead state. A half-dead right sibling would
* have no downlink in the parent, which would be highly confusing later
* when we delete the downlink that follows the current page's downlink.
* (I believe the deletion would work correctly, but it would fail the
* Before attempting to lock the parent page, check that the right sibling
* is not in half-dead state. A half-dead right sibling would have no
* downlink in the parent, which would be highly confusing later when we
* delete the downlink that follows the current page's downlink. (I
* believe the deletion would work correctly, but it would fail the
* cross-check we make that the following downlink points to the right
* sibling of the delete page.)
*/

View File

@ -40,9 +40,8 @@ typedef struct
BTSpool *spool;
/*
* spool2 is needed only when the index is a unique index. Dead tuples
* are put into spool2 instead of spool in order to avoid uniqueness
* check.
* spool2 is needed only when the index is a unique index. Dead tuples are
* put into spool2 instead of spool in order to avoid uniqueness check.
*/
BTSpool *spool2;
double indtuples;

View File

@ -1027,10 +1027,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
offnum = OffsetNumberPrev(offnum);
/*
* By here the scan position is now set for the first key. If all
* further tuples are expected to match we set the SK_BT_MATCHED flag
* to avoid re-checking the scan key later. This is a big win for
* slow key matches though is still significant even for fast datatypes.
* By here the scan position is now set for the first key. If all further
* tuples are expected to match we set the SK_BT_MATCHED flag to avoid
* re-checking the scan key later. This is a big win for slow key matches
* though is still significant even for fast datatypes.
*/
switch (startKeys[0]->sk_strategy)
{

View File

@ -1430,8 +1430,8 @@ _bt_checkkeys(IndexScanDesc scan,
Datum test;
/*
* If the scan key has already matched we can skip this key, as
* long as the index tuple does not contain NULL values.
* If the scan key has already matched we can skip this key, as long
* as the index tuple does not contain NULL values.
*/
if (key->sk_flags & SK_BT_MATCHED && !IndexTupleHasNulls(tuple))
continue;

View File

@ -26,6 +26,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec;
xlrec = (xl_replorigin_set *) rec;
appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
@ -38,6 +39,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
case XLOG_REPLORIGIN_DROP:
{
xl_replorigin_drop *xlrec;
xlrec = (xl_replorigin_drop *) rec;
appendStringInfo(buf, "drop %u", xlrec->node_id);

View File

@ -37,7 +37,8 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
memset(parsed, 0, sizeof(*parsed));
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
* present */
parsed->xact_time = xlrec->xact_time;
@ -123,7 +124,8 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
memset(parsed, 0, sizeof(*parsed));
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
* present */
parsed->xact_time = xlrec->xact_time;

View File

@ -658,6 +658,7 @@ Datum
spgcanreturn(PG_FUNCTION_ARGS)
{
Relation index = (Relation) PG_GETARG_POINTER(0);
/* int i = PG_GETARG_INT32(1); */
SpGistCache *cache;

View File

@ -28,10 +28,12 @@
typedef struct
{
uint32 seed; /* random seed */
BlockNumber startblock; /* starting block, we use ths for syncscan support */
BlockNumber startblock; /* starting block, we use ths for syncscan
* support */
BlockNumber nblocks; /* number of blocks */
BlockNumber blockno; /* current block */
float4 probability; /* probabilty that tuple will be returned (0.0-1.0) */
float4 probability; /* probabilty that tuple will be returned
* (0.0-1.0) */
OffsetNumber lt; /* last tuple returned from current block */
SamplerRandomState randstate; /* random generator tsdesc */
} BernoulliSamplerData;
@ -82,9 +84,8 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
(BernoulliSamplerData *) tsdesc->tsmdata;
/*
* Bernoulli sampling scans all blocks on the table and supports
* syncscan so loop from startblock to startblock instead of
* from 0 to nblocks.
* Bernoulli sampling scans all blocks on the table and supports syncscan
* so loop from startblock to startblock instead of from 0 to nblocks.
*/
if (sampler->blockno == InvalidBlockNumber)
sampler->blockno = sampler->startblock;
@ -142,8 +143,8 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
/*
* Loop over tuple offsets until the random generator returns value that
* is within the probability of returning the tuple or until we reach
* end of the block.
* is within the probability of returning the tuple or until we reach end
* of the block.
*
* (This is our implementation of bernoulli trial)
*/

View File

@ -78,15 +78,15 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
fcinfo.argnull[0] = false;
/*
* Second arg for init function is always REPEATABLE
* When tablesample->repeatable is NULL then REPEATABLE clause was not
* specified.
* When specified, the expression cannot evaluate to NULL.
* Second arg for init function is always REPEATABLE When
* tablesample->repeatable is NULL then REPEATABLE clause was not
* specified. When specified, the expression cannot evaluate to NULL.
*/
if (tablesample->repeatable)
{
ExprState *argstate = ExecInitExpr((Expr *) tablesample->repeatable,
(PlanState *) scanstate);
fcinfo.arg[1] = ExecEvalExpr(argstate, econtext,
&fcinfo.argnull[1], NULL);
if (fcinfo.argnull[1])
@ -208,8 +208,8 @@ tablesample_getnext(TableSampleDesc *desc)
visible = SampleTupleVisible(tuple, tupoffset, scan);
/*
* Let the sampling method examine the actual tuple and decide if we
* should return it.
* Let the sampling method examine the actual tuple and decide if
* we should return it.
*
* Note that we let it examine even invisible tuples for
* statistical purposes, but not return them since user should
@ -251,16 +251,16 @@ tablesample_getnext(TableSampleDesc *desc)
PointerGetDatum(desc)));
/*
* Report our new scan position for synchronization purposes. We
* don't do that when moving backwards, however. That would just
* mess up any other forward-moving scanners.
* Report our new scan position for synchronization purposes. We don't
* do that when moving backwards, however. That would just mess up any
* other forward-moving scanners.
*
* Note: we do this before checking for end of scan so that the
* final state of the position hint is back at the start of the
* rel. That's not strictly necessary, but otherwise when you run
* the same query multiple times the starting position would shift
* a little bit backwards on every invocation, which is confusing.
* We don't guarantee any specific ordering in general, though.
* Note: we do this before checking for end of scan so that the final
* state of the position hint is back at the start of the rel. That's
* not strictly necessary, but otherwise when you run the same query
* multiple times the starting position would shift a little bit
* backwards on every invocation, which is confusing. We don't
* guarantee any specific ordering in general, though.
*/
if (scan->rs_syncscan)
ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ?
@ -321,8 +321,8 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
{
/*
* If this scan is reading whole pages at a time, there is already
* visibility info present in rs_vistuples so we can just search it
* for the tupoffset.
* visibility info present in rs_vistuples so we can just search it for
* the tupoffset.
*/
if (scan->rs_pageatatime)
{
@ -333,8 +333,8 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
* Do the binary search over rs_vistuples, it's already sorted by
* OffsetNumber so we don't need to do any sorting ourselves here.
*
* We could use bsearch() here but it's slower for integers because
* of the function call overhead and because it needs boiler plate code
* We could use bsearch() here but it's slower for integers because of
* the function call overhead and because it needs boiler plate code
* it would not save us anything code-wise anyway.
*/
while (start <= end)

View File

@ -141,8 +141,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
return;
/*
* Comply with the WAL-before-data rule: if caller specified it wants
* this value to be recorded in WAL, do so before touching the data.
* Comply with the WAL-before-data rule: if caller specified it wants this
* value to be recorded in WAL, do so before touching the data.
*/
if (do_xlog)
WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid);
@ -159,9 +159,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
/*
* We split the xids to set the timestamp to in groups belonging to the
* same SLRU page; the first element in each such set is its head. The
* first group has the main XID as the head; subsequent sets use the
* first subxid not on the previous page as head. This way, we only have
* to lock/modify each SLRU page once.
* first group has the main XID as the head; subsequent sets use the first
* subxid not on the previous page as head. This way, we only have to
* lock/modify each SLRU page once.
*/
for (i = 0, headxid = xid;;)
{
@ -183,8 +183,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
break;
/*
* Set the new head and skip over it, as well as over the subxids
* we just wrote.
* Set the new head and skip over it, as well as over the subxids we
* just wrote.
*/
headxid = subxids[j];
i += j - i + 1;
@ -495,8 +495,8 @@ BootStrapCommitTs(void)
{
/*
* Nothing to do here at present, unlike most other SLRU modules; segments
* are created when the server is started with this module enabled.
* See StartupCommitTs.
* are created when the server is started with this module enabled. See
* StartupCommitTs.
*/
}

View File

@ -1190,9 +1190,9 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
MultiXactIdSetOldestVisible();
/*
* If we know the multi is used only for locking and not for updates,
* then we can skip checking if the value is older than our oldest
* visible multi. It cannot possibly still be running.
* If we know the multi is used only for locking and not for updates, then
* we can skip checking if the value is older than our oldest visible
* multi. It cannot possibly still be running.
*/
if (onlyLock &&
MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
@ -1207,14 +1207,14 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it has already been removed, or will be removed shortly, by
* truncation. Returning the wrong values could lead
* to an incorrect visibility result. However, to support pg_upgrade we
* need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
* tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
* truncation. Returning the wrong values could lead to an incorrect
* visibility result. However, to support pg_upgrade we need to allow an
* empty set to be returned regardless, if the caller is willing to accept
* it; the caller is expected to check that it's an allowed condition
* (such as ensuring that the infomask bits set on the tuple are
* consistent with the pg_upgrade scenario). If the caller is expecting
* this to be called only on recently created multis, then we raise an
* error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
* seen, it implies undetected ID wraparound has occurred. This raises a
@ -2123,11 +2123,11 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
* enough to contain the next value that would be created.
*
* We need to do this pretty early during the first startup in binary
* upgrade mode: before StartupMultiXact() in fact, because this routine is
* called even before that by StartupXLOG(). And we can't do it earlier
* than at this point, because during that first call of this routine we
* determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru
* needs.
* upgrade mode: before StartupMultiXact() in fact, because this routine
* is called even before that by StartupXLOG(). And we can't do it
* earlier than at this point, because during that first call of this
* routine we determine the MultiXactState->nextMXact value that
* MaybeExtendOffsetSlru needs.
*/
if (IsBinaryUpgrade)
MaybeExtendOffsetSlru();
@ -2202,11 +2202,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/*
* Determine the offset of the oldest multixact that might still be
* referenced. Normally, we can read the offset from the multixact itself,
* but there's an important special case: if there are no multixacts in
* existence at all, oldest_datminmxid obviously can't point to one. It
* will instead point to the multixact ID that will be assigned the next
* time one is needed.
* referenced. Normally, we can read the offset from the multixact
* itself, but there's an important special case: if there are no
* multixacts in existence at all, oldest_datminmxid obviously can't point
* to one. It will instead point to the multixact ID that will be
* assigned the next time one is needed.
*
* NB: oldest_dataminmxid is the oldest multixact that might still be
* referenced from a table, unlike in DetermineSafeOldestOffset, where we
@ -2520,10 +2520,9 @@ DetermineSafeOldestOffset(MultiXactId oldestMXact)
* obviously can't point to one. It will instead point to the multixact
* ID that will be assigned the next time one is needed.
*
* NB: oldestMXact should be the oldest multixact that still exists in
* the SLRU, unlike in SetMultiXactIdLimit, where we do this same
* computation based on the oldest value that might be referenced in a
* table.
* NB: oldestMXact should be the oldest multixact that still exists in the
* SLRU, unlike in SetMultiXactIdLimit, where we do this same computation
* based on the oldest value that might be referenced in a table.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
if (MultiXactState->nextMXact == oldestMXact)

View File

@ -205,8 +205,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
* Normally, the user will have requested at least one worker process,
* but if by chance they have not, we can skip a bunch of things here.
* Normally, the user will have requested at least one worker process, but
* if by chance they have not, we can skip a bunch of things here.
*/
if (pcxt->nworkers > 0)
{
@ -251,9 +251,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
* memory segment; instead, just use backend-private memory.
*
* Also, if we can't create a dynamic shared memory segment because the
* maximum number of segments have already been created, then fall back
* to backend-private memory, and plan not to use any workers. We hope
* this won't happen very often, but it's better to abandon the use of
* maximum number of segments have already been created, then fall back to
* backend-private memory, and plan not to use any workers. We hope this
* won't happen very often, but it's better to abandon the use of
* parallelism than to fail outright.
*/
segsize = shm_toc_estimate(&pcxt->estimator);
@ -408,8 +408,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
*
* The caller must be able to tolerate ending up with fewer workers than
* expected, so there is no need to throw an error here if registration
* fails. It wouldn't help much anyway, because registering the worker
* in no way guarantees that it will start up and initialize successfully.
* fails. It wouldn't help much anyway, because registering the worker in
* no way guarantees that it will start up and initialize successfully.
*/
for (i = 0; i < pcxt->nworkers; ++i)
{
@ -421,8 +421,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
else
{
/*
* If we weren't able to register the worker, then we've bumped
* up against the max_worker_processes limit, and future
* If we weren't able to register the worker, then we've bumped up
* against the max_worker_processes limit, and future
* registrations will probably fail too, so arrange to skip them.
* But we still have to execute this code for the remaining slots
* to make sure that we forget about the error queues we budgeted
@ -459,9 +459,9 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
int i;
/*
* This will process any parallel messages that are pending, which
* may change the outcome of the loop that follows. It may also
* throw an error propagated from a worker.
* This will process any parallel messages that are pending, which may
* change the outcome of the loop that follows. It may also throw an
* error propagated from a worker.
*/
CHECK_FOR_INTERRUPTS();
@ -637,9 +637,9 @@ HandleParallelMessages(void)
for (i = 0; i < pcxt->nworkers; ++i)
{
/*
* Read as many messages as we can from each worker, but stop
* when either (1) the error queue goes away, which can happen if
* we receive a Terminate message from the worker; or (2) no more
* Read as many messages as we can from each worker, but stop when
* either (1) the error queue goes away, which can happen if we
* receive a Terminate message from the worker; or (2) no more
* messages can be read from the worker without blocking.
*/
while (pcxt->worker[i].error_mqh != NULL)
@ -686,6 +686,7 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
case 'K': /* BackendKeyData */
{
int32 pid = pq_getmsgint(msg, 4);
(void) pq_getmsgint(msg, 4); /* discard cancel key */
(void) pq_getmsgend(msg);
pcxt->worker[i].pid = pid;
@ -824,8 +825,8 @@ ParallelWorkerMain(Datum main_arg)
ALLOCSET_DEFAULT_MAXSIZE);
/*
* Now that we have a resource owner, we can attach to the dynamic
* shared memory segment and read the table of contents.
* Now that we have a resource owner, we can attach to the dynamic shared
* memory segment and read the table of contents.
*/
seg = dsm_attach(DatumGetUInt32(main_arg));
if (seg == NULL)
@ -870,9 +871,9 @@ ParallelWorkerMain(Datum main_arg)
/*
* Send a BackendKeyData message to the process that initiated parallelism
* so that it has access to our PID before it receives any other messages
* from us. Our cancel key is sent, too, since that's the way the protocol
* message is defined, but it won't actually be used for anything in this
* case.
* from us. Our cancel key is sent, too, since that's the way the
* protocol message is defined, but it won't actually be used for anything
* in this case.
*/
pq_beginmessage(&msgbuf, 'K');
pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
@ -880,13 +881,13 @@ ParallelWorkerMain(Datum main_arg)
pq_endmessage(&msgbuf);
/*
* Hooray! Primary initialization is complete. Now, we need to set up
* our backend-local state to match the original backend.
* Hooray! Primary initialization is complete. Now, we need to set up our
* backend-local state to match the original backend.
*/
/*
* Load libraries that were loaded by original backend. We want to do this
* before restoring GUCs, because the libraries might define custom
* Load libraries that were loaded by original backend. We want to do
* this before restoring GUCs, because the libraries might define custom
* variables.
*/
libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
@ -928,7 +929,8 @@ ParallelWorkerMain(Datum main_arg)
SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
/*
* We've initialized all of our state now; nothing should change hereafter.
* We've initialized all of our state now; nothing should change
* hereafter.
*/
EnterParallelMode();

View File

@ -256,24 +256,24 @@ AtAbort_Twophase(void)
return;
/*
* What to do with the locked global transaction entry? If we were in
* the process of preparing the transaction, but haven't written the WAL
* What to do with the locked global transaction entry? If we were in the
* process of preparing the transaction, but haven't written the WAL
* record and state file yet, the transaction must not be considered as
* prepared. Likewise, if we are in the process of finishing an
* already-prepared transaction, and fail after having already written
* the 2nd phase commit or rollback record to the WAL, the transaction
* should not be considered as prepared anymore. In those cases, just
* remove the entry from shared memory.
* already-prepared transaction, and fail after having already written the
* 2nd phase commit or rollback record to the WAL, the transaction should
* not be considered as prepared anymore. In those cases, just remove the
* entry from shared memory.
*
* Otherwise, the entry must be left in place so that the transaction
* can be finished later, so just unlock it.
* Otherwise, the entry must be left in place so that the transaction can
* be finished later, so just unlock it.
*
* If we abort during prepare, after having written the WAL record, we
* might not have transferred all locks and other state to the prepared
* transaction yet. Likewise, if we abort during commit or rollback,
* after having written the WAL record, we might not have released
* all the resources held by the transaction yet. In those cases, the
* in-memory state can be wrong, but it's too late to back out.
* after having written the WAL record, we might not have released all the
* resources held by the transaction yet. In those cases, the in-memory
* state can be wrong, but it's too late to back out.
*/
if (!MyLockedGxact->valid)
{
@ -408,8 +408,8 @@ MarkAsPreparing(TransactionId xid, const char *gid,
TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact;
/*
* Remember that we have this GlobalTransaction entry locked for us.
* If we abort after this, we must release it.
* Remember that we have this GlobalTransaction entry locked for us. If we
* abort after this, we must release it.
*/
MyLockedGxact = gxact;
@ -1423,8 +1423,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
* no one else will try to commit/rollback, and so it will be recycled
* if we fail after this point. It is still locked by our backend so it
* no one else will try to commit/rollback, and so it will be recycled if
* we fail after this point. It is still locked by our backend so it
* won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
@ -2055,8 +2055,9 @@ RecoverPreparedTransactions(void)
StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
/*
* We're done with recovering this transaction. Clear MyLockedGxact,
* like we do in PrepareTransaction() during normal operation.
* We're done with recovering this transaction. Clear
* MyLockedGxact, like we do in PrepareTransaction() during normal
* operation.
*/
PostPrepare_Twophase();

View File

@ -494,8 +494,8 @@ AssignTransactionId(TransactionState s)
Assert(s->state == TRANS_INPROGRESS);
/*
* Workers synchronize transaction state at the beginning of each
* parallel operation, so we can't account for new XIDs at this point.
* Workers synchronize transaction state at the beginning of each parallel
* operation, so we can't account for new XIDs at this point.
*/
if (IsInParallelMode())
elog(ERROR, "cannot assign XIDs during a parallel operation");
@ -788,10 +788,10 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
return false;
/*
* In parallel workers, the XIDs we must consider as current are stored
* in ParallelCurrentXids rather than the transaction-state stack. Note
* that the XIDs in this array are sorted numerically rather than
* according to transactionIdPrecedes order.
* In parallel workers, the XIDs we must consider as current are stored in
* ParallelCurrentXids rather than the transaction-state stack. Note that
* the XIDs in this array are sorted numerically rather than according to
* transactionIdPrecedes order.
*/
if (nParallelCurrentXids > 0)
{
@ -2316,8 +2316,8 @@ PrepareTransaction(void)
/*
* In normal commit-processing, this is all non-critical post-transaction
* cleanup. When the transaction is prepared, however, it's important that
* the locks and other per-backend resources are transferred to the
* cleanup. When the transaction is prepared, however, it's important
* that the locks and other per-backend resources are transferred to the
* prepared transaction's PGPROC entry. Note that if an error is raised
* here, it's too late to abort the transaction. XXX: This probably should
* be in a critical section, to force a PANIC if any of this fails, but
@ -2358,9 +2358,8 @@ PrepareTransaction(void)
/*
* Allow another backend to finish the transaction. After
* PostPrepare_Twophase(), the transaction is completely detached from
* our backend. The rest is just non-critical cleanup of backend-local
* state.
* PostPrepare_Twophase(), the transaction is completely detached from our
* backend. The rest is just non-critical cleanup of backend-local state.
*/
PostPrepare_Twophase();
@ -2520,9 +2519,9 @@ AbortTransaction(void)
latestXid = InvalidTransactionId;
/*
* Since the parallel master won't get our value of XactLastRecEnd in this
* case, we nudge WAL-writer ourselves in this case. See related comments in
* RecordTransactionAbort for why this matters.
* Since the parallel master won't get our value of XactLastRecEnd in
* this case, we nudge WAL-writer ourselves in this case. See related
* comments in RecordTransactionAbort for why this matters.
*/
XLogSetAsyncXactLSN(XactLastRecEnd);
}
@ -4017,12 +4016,13 @@ BeginInternalSubTransaction(char *name)
/*
* Workers synchronize transaction state at the beginning of each parallel
* operation, so we can't account for new subtransactions after that point.
* We might be able to make an exception for the type of subtransaction
* established by this function, which is typically used in contexts where
* we're going to release or roll back the subtransaction before proceeding
* further, so that no enduring change to the transaction state occurs.
* For now, however, we prohibit this case along with all the others.
* operation, so we can't account for new subtransactions after that
* point. We might be able to make an exception for the type of
* subtransaction established by this function, which is typically used in
* contexts where we're going to release or roll back the subtransaction
* before proceeding further, so that no enduring change to the
* transaction state occurs. For now, however, we prohibit this case along
* with all the others.
*/
if (IsInParallelMode())
ereport(ERROR,
@ -4773,7 +4773,8 @@ Size
EstimateTransactionStateSpace(void)
{
TransactionState s;
Size nxids = 5; /* iso level, deferrable, top & current XID, XID count */
Size nxids = 5; /* iso level, deferrable, top & current XID,
* XID count */
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{
@ -4830,8 +4831,8 @@ SerializeTransactionState(Size maxsize, char *start_address)
}
/*
* OK, we need to generate a sorted list of XIDs that our workers
* should view as current. First, figure out how many there are.
* OK, we need to generate a sorted list of XIDs that our workers should
* view as current. First, figure out how many there are.
*/
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{

View File

@ -951,14 +951,14 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
/*
* Check to see if my copy of RedoRecPtr or doPageWrites is out of date.
* If so, may have to go back and have the caller recompute everything.
* This can only happen just after a checkpoint, so it's better to be
* slow in this case and fast otherwise.
* This can only happen just after a checkpoint, so it's better to be slow
* in this case and fast otherwise.
*
* If we aren't doing full-page writes then RedoRecPtr doesn't actually
* affect the contents of the XLOG record, so we'll update our local copy
* but not force a recomputation. (If doPageWrites was just turned off,
* we could recompute the record without full pages, but we choose not
* to bother.)
* we could recompute the record without full pages, but we choose not to
* bother.)
*/
if (RedoRecPtr != Insert->RedoRecPtr)
{
@ -970,8 +970,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites)
{
/*
* Oops, some buffer now needs to be backed up that the caller
* didn't back up. Start over.
* Oops, some buffer now needs to be backed up that the caller didn't
* back up. Start over.
*/
WALInsertLockRelease();
END_CRIT_SECTION();
@ -2059,8 +2059,8 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr)
* estimated end of next checkpoint.
*
* To estimate where the next checkpoint will finish, assume that the
* system runs steadily consuming CheckPointDistanceEstimate
* bytes between every checkpoint.
* system runs steadily consuming CheckPointDistanceEstimate bytes between
* every checkpoint.
*
* The reason this calculation is done from the prior checkpoint, not the
* one that just finished, is that this behaves better if some checkpoint
@ -3005,11 +3005,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
/*
* XXX: What should we use as max_segno? We used to use XLOGfileslop when
* that was a constant, but that was always a bit dubious: normally, at a
* checkpoint, XLOGfileslop was the offset from the checkpoint record,
* but here, it was the offset from the insert location. We can't do the
* checkpoint, XLOGfileslop was the offset from the checkpoint record, but
* here, it was the offset from the insert location. We can't do the
* normal XLOGfileslop calculation here because we don't have access to
* the prior checkpoint's redo location. So somewhat arbitrarily, just
* use CheckPointSegments.
* the prior checkpoint's redo location. So somewhat arbitrarily, just use
* CheckPointSegments.
*/
max_segno = logsegno + CheckPointSegments;
if (!InstallXLogFileSegment(&installed_segno, tmppath,
@ -3098,7 +3098,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
nread = upto - nbytes;
/*
* The part that is not read from the source file is filled with zeros.
* The part that is not read from the source file is filled with
* zeros.
*/
if (nread < sizeof(buffer))
memset(buffer, 0, sizeof(buffer));
@ -3153,8 +3154,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
/*
* Now move the segment into place with its final name. (Or just return
* the path to the file we created, if the caller wants to handle the
* rest on its own.)
* the path to the file we created, if the caller wants to handle the rest
* on its own.)
*/
if (dstfname)
{
@ -3690,8 +3691,8 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
/*
* Remove files that are on a timeline older than the new one we're
* switching to, but with a segment number >= the first segment on
* the new timeline.
* switching to, but with a segment number >= the first segment on the
* new timeline.
*/
if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
strcmp(xlde->d_name + 8, switchseg + 8) > 0)
@ -3768,12 +3769,13 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
segname)));
#ifdef WIN32
/*
* On Windows, if another process (e.g another backend) holds the file
* open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
* will still show up in directory listing until the last handle is
* closed. To avoid confusing the lingering deleted file for a live WAL
* file that needs to be archived, rename it before deleting it.
* closed. To avoid confusing the lingering deleted file for a live
* WAL file that needs to be archived, rename it before deleting it.
*
* If another process holds the file open without FILE_SHARE_DELETE
* flag, rename will fail. We'll try again at the next checkpoint.
@ -4609,11 +4611,11 @@ XLOGShmemInit(void)
int i;
#ifdef WAL_DEBUG
/*
* Create a memory context for WAL debugging that's exempt from the
* normal "no pallocs in critical section" rule. Yes, that can lead to a
* PANIC if an allocation fails, but wal_debug is not for production use
* anyway.
* Create a memory context for WAL debugging that's exempt from the normal
* "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
* an allocation fails, but wal_debug is not for production use anyway.
*/
if (walDebugCxt == NULL)
{
@ -5135,9 +5137,9 @@ readRecoveryCommandFile(void)
}
/*
* Override any inconsistent requests. Not that this is a change
* of behaviour in 9.5; prior to this we simply ignored a request
* to pause if hot_standby = off, which was surprising behaviour.
* Override any inconsistent requests. Not that this is a change of
* behaviour in 9.5; prior to this we simply ignored a request to pause if
* hot_standby = off, which was surprising behaviour.
*/
if (recoveryTargetAction == RECOVERY_TARGET_ACTION_PAUSE &&
recoveryTargetActionSet &&
@ -6222,9 +6224,9 @@ StartupXLOG(void)
* in place if the database had been cleanly shut down, but it seems
* safest to just remove them always and let them be rebuilt during the
* first backend startup. These files needs to be removed from all
* directories including pg_tblspc, however the symlinks are created
* only after reading tablesapce_map file in case of archive recovery
* from backup, so needs to clear old relcache files here after creating
* directories including pg_tblspc, however the symlinks are created only
* after reading tablesapce_map file in case of archive recovery from
* backup, so needs to clear old relcache files here after creating
* symlinks.
*/
RelationCacheInitFileRemove();
@ -6442,9 +6444,9 @@ StartupXLOG(void)
* Also set backupEndPoint and use minRecoveryPoint as the backup end
* location if we're starting recovery from a base backup which was
* taken from a standby. In this case, the database system status in
* pg_control must indicate that the database was already in
* recovery. Usually that will be DB_IN_ARCHIVE_RECOVERY but also can
* be DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
* pg_control must indicate that the database was already in recovery.
* Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be
* DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
* before reaching this point; e.g. because restore_command or
* primary_conninfo were faulty.
*
@ -6500,10 +6502,10 @@ StartupXLOG(void)
/*
* If there was a tablespace_map file, it's done its job and the
* symlinks have been created. We must get rid of the map file
* so that if we crash during recovery, we don't create symlinks
* again. It seems prudent though to just rename the file out of
* the way rather than delete it completely.
* symlinks have been created. We must get rid of the map file so
* that if we crash during recovery, we don't create symlinks again.
* It seems prudent though to just rename the file out of the way
* rather than delete it completely.
*/
if (haveTblspcMap)
{
@ -6859,7 +6861,8 @@ StartupXLOG(void)
{
/*
* Before we continue on the new timeline, clean up any
* (possibly bogus) future WAL segments on the old timeline.
* (possibly bogus) future WAL segments on the old
* timeline.
*/
RemoveNonParentXlogFiles(EndRecPtr, ThisTimeLineID);
@ -6895,12 +6898,13 @@ StartupXLOG(void)
/*
* This is the last point where we can restart recovery with a
* new recovery target, if we shutdown and begin again. After
* this, Resource Managers may choose to do permanent corrective
* actions at end of recovery.
* this, Resource Managers may choose to do permanent
* corrective actions at end of recovery.
*/
switch (recoveryTargetAction)
{
case RECOVERY_TARGET_ACTION_SHUTDOWN:
/*
* exit with special return code to request shutdown
* of postmaster. Log messages issued from
@ -7259,8 +7263,8 @@ StartupXLOG(void)
* too.
*
* If a .done or .ready file already exists for the old timeline,
* however, we had already determined that the segment is complete,
* so we can let it be archived normally. (In particular, if it was
* however, we had already determined that the segment is complete, so
* we can let it be archived normally. (In particular, if it was
* restored from the archive to begin with, it's expected to have a
* .done file).
*/
@ -7366,8 +7370,8 @@ StartupXLOG(void)
XLogReportParameters();
/*
* Local WAL inserts enabled, so it's time to finish initialization
* of commit timestamp.
* Local WAL inserts enabled, so it's time to finish initialization of
* commit timestamp.
*/
CompleteCommitTsInitialization();
@ -8056,8 +8060,8 @@ static void
UpdateCheckPointDistanceEstimate(uint64 nbytes)
{
/*
* To estimate the number of segments consumed between checkpoints, keep
* a moving average of the amount of WAL generated in previous checkpoint
* To estimate the number of segments consumed between checkpoints, keep a
* moving average of the amount of WAL generated in previous checkpoint
* cycles. However, if the load is bursty, with quiet periods and busy
* periods, we want to cater for the peak load. So instead of a plain
* moving average, let the average decline slowly if the previous cycle
@ -9473,8 +9477,8 @@ xlog_redo(XLogReaderState *record)
}
/*
* Update the commit timestamp tracking. If there was a change
* it needs to be activated or deactivated accordingly.
* Update the commit timestamp tracking. If there was a change it
* needs to be activated or deactivated accordingly.
*/
if (track_commit_timestamp != xlrec.track_commit_timestamp)
{
@ -9483,6 +9487,7 @@ xlog_redo(XLogReaderState *record)
if (track_commit_timestamp)
ActivateCommitTs();
else
/*
* We can't create a new WAL record here, but that's OK as
* master did the WAL logging already and we will replay the
@ -10023,10 +10028,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
linkpath[rllen] = '\0';
/*
* Add the escape character '\\' before newline in a string
* to ensure that we can distinguish between the newline in
* the tablespace path and end of line while reading
* tablespace_map file during archive recovery.
* Add the escape character '\\' before newline in a string to
* ensure that we can distinguish between the newline in the
* tablespace path and end of line while reading tablespace_map
* file during archive recovery.
*/
initStringInfo(&buflinkpath);
@ -10353,8 +10358,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
BACKUP_LABEL_FILE)));
/*
* Remove tablespace_map file if present, it is created
* only if there are tablespaces.
* Remove tablespace_map file if present, it is created only if there
* are tablespaces.
*/
unlink(TABLESPACE_MAP);
}
@ -10775,8 +10780,10 @@ read_tablespace_map(List **tablespaces)
char tbsoid[MAXPGPATH];
char *tbslinkpath;
char str[MAXPGPATH];
int ch, prev_ch = -1,
i = 0, n;
int ch,
prev_ch = -1,
i = 0,
n;
/*
* See if tablespace_map file is present
@ -10794,9 +10801,9 @@ read_tablespace_map(List **tablespaces)
/*
* Read and parse the link name and path lines from tablespace_map file
* (this code is pretty crude, but we are not expecting any variability
* in the file format). While taking backup we embed escape character
* '\\' before newline in tablespace path, so that during reading of
* (this code is pretty crude, but we are not expecting any variability in
* the file format). While taking backup we embed escape character '\\'
* before newline in tablespace path, so that during reading of
* tablespace_map file, we could distinguish newline in tablespace path
* and end of line. Now while reading tablespace_map file, remove the
* escape character that has been added in tablespace path during backup.
@ -11272,7 +11279,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
if (!TimestampDifferenceExceeds(last_fail_time, now,
wal_retrieve_retry_interval))
{
long secs, wait_time;
long secs,
wait_time;
int usecs;
TimestampDifference(last_fail_time, now, &secs, &usecs);
@ -11605,8 +11613,8 @@ fsync_pgdata(char *datadir)
return;
/*
* If possible, hint to the kernel that we're soon going to fsync
* the data directory and its contents.
* If possible, hint to the kernel that we're soon going to fsync the data
* directory and its contents.
*/
#if defined(HAVE_SYNC_FILE_RANGE) || \
(defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED))

View File

@ -602,7 +602,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
&compressed_len);
}
/* Fill in the remaining fields in the XLogRecordBlockHeader struct */
/*
* Fill in the remaining fields in the XLogRecordBlockHeader
* struct
*/
bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
/*
@ -790,10 +793,9 @@ XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
source = page;
/*
* We recheck the actual size even if pglz_compress() reports success
* and see if the number of bytes saved by compression is larger than
* the length of extra data needed for the compressed version of block
* image.
* We recheck the actual size even if pglz_compress() reports success and
* see if the number of bytes saved by compression is larger than the
* length of extra data needed for the compressed version of block image.
*/
len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
if (len >= 0 &&

View File

@ -1093,9 +1093,10 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
/*
* cross-check that hole_offset == 0 and hole_length == 0
* if the HAS_HOLE flag is not set.
* cross-check that hole_offset == 0 and hole_length == 0 if
* the HAS_HOLE flag is not set.
*/
if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
(blk->hole_offset != 0 || blk->hole_length != 0))
@ -1107,9 +1108,10 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
/*
* cross-check that bimg_len < BLCKSZ
* if the IS_COMPRESSED flag is set.
* cross-check that bimg_len < BLCKSZ if the IS_COMPRESSED
* flag is set.
*/
if ((blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
blk->bimg_len == BLCKSZ)
@ -1120,9 +1122,10 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
/*
* cross-check that bimg_len = BLCKSZ if neither
* HAS_HOLE nor IS_COMPRESSED flag is set.
* cross-check that bimg_len = BLCKSZ if neither HAS_HOLE nor
* IS_COMPRESSED flag is set.
*/
if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
!(blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&

View File

@ -401,6 +401,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
proc_exit(1); /* should never return */
case BootstrapProcess:
/*
* There was a brief instant during which mode was Normal; this is
* okay. We need to be in bootstrap mode during BootStrapXLOG for

View File

@ -189,7 +189,8 @@ sub Catalogs
}
else
{
die "unknown column option $attopt on column $attname"
die
"unknown column option $attopt on column $attname";
}
}
push @{ $catalog{columns} }, \%row;

View File

@ -397,8 +397,8 @@ ExecuteGrantStmt(GrantStmt *stmt)
istmt.behavior = stmt->behavior;
/*
* Convert the RoleSpec list into an Oid list. Note that at this point
* we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
* Convert the RoleSpec list into an Oid list. Note that at this point we
* insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
* there shouldn't be any additional work needed to support this case.
*/
foreach(cell, stmt->grantees)
@ -892,8 +892,8 @@ ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt *stmt)
iacls.behavior = action->behavior;
/*
* Convert the RoleSpec list into an Oid list. Note that at this point
* we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
* Convert the RoleSpec list into an Oid list. Note that at this point we
* insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
* there shouldn't be any additional work needed to support this case.
*/
foreach(cell, action->grantees)

View File

@ -384,6 +384,7 @@ sub emit_pgattr_row
}
elsif ($priornotnull)
{
# attnotnull will automatically be set if the type is
# fixed-width and prior columns are all NOT NULL ---
# compare DefineAttr in bootstrap.c. oidvector and

View File

@ -1709,8 +1709,8 @@ BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii)
ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * ncols);
/*
* We have to look up the operator's strategy number. This
* provides a cross-check that the operator does match the index.
* We have to look up the operator's strategy number. This provides a
* cross-check that the operator does match the index.
*/
/* We need the func OIDs and strategy numbers too */
for (i = 0; i < ncols; i++)

View File

@ -453,89 +453,188 @@ static const struct object_type_map
const char *tm_name;
ObjectType tm_type;
}
ObjectTypeMap[] =
{
/* OCLASS_CLASS, all kinds of relations */
{ "table", OBJECT_TABLE },
{ "index", OBJECT_INDEX },
{ "sequence", OBJECT_SEQUENCE },
{ "toast table", -1 }, /* unmapped */
{ "view", OBJECT_VIEW },
{ "materialized view", OBJECT_MATVIEW },
{ "composite type", -1 }, /* unmapped */
{ "foreign table", OBJECT_FOREIGN_TABLE },
{ "table column", OBJECT_COLUMN },
{ "index column", -1 }, /* unmapped */
{ "sequence column", -1 }, /* unmapped */
{ "toast table column", -1 }, /* unmapped */
{ "view column", -1 }, /* unmapped */
{ "materialized view column", -1 }, /* unmapped */
{ "composite type column", -1 }, /* unmapped */
{ "foreign table column", OBJECT_COLUMN },
{
"table", OBJECT_TABLE
},
{
"index", OBJECT_INDEX
},
{
"sequence", OBJECT_SEQUENCE
},
{
"toast table", -1
}, /* unmapped */
{
"view", OBJECT_VIEW
},
{
"materialized view", OBJECT_MATVIEW
},
{
"composite type", -1
}, /* unmapped */
{
"foreign table", OBJECT_FOREIGN_TABLE
},
{
"table column", OBJECT_COLUMN
},
{
"index column", -1
}, /* unmapped */
{
"sequence column", -1
}, /* unmapped */
{
"toast table column", -1
}, /* unmapped */
{
"view column", -1
}, /* unmapped */
{
"materialized view column", -1
}, /* unmapped */
{
"composite type column", -1
}, /* unmapped */
{
"foreign table column", OBJECT_COLUMN
},
/* OCLASS_PROC */
{ "aggregate", OBJECT_AGGREGATE },
{ "function", OBJECT_FUNCTION },
{
"aggregate", OBJECT_AGGREGATE
},
{
"function", OBJECT_FUNCTION
},
/* OCLASS_TYPE */
{ "type", OBJECT_TYPE },
{
"type", OBJECT_TYPE
},
/* OCLASS_CAST */
{ "cast", OBJECT_CAST },
{
"cast", OBJECT_CAST
},
/* OCLASS_COLLATION */
{ "collation", OBJECT_COLLATION },
{
"collation", OBJECT_COLLATION
},
/* OCLASS_CONSTRAINT */
{ "table constraint", OBJECT_TABCONSTRAINT },
{ "domain constraint", OBJECT_DOMCONSTRAINT },
{
"table constraint", OBJECT_TABCONSTRAINT
},
{
"domain constraint", OBJECT_DOMCONSTRAINT
},
/* OCLASS_CONVERSION */
{ "conversion", OBJECT_CONVERSION },
{
"conversion", OBJECT_CONVERSION
},
/* OCLASS_DEFAULT */
{ "default value", OBJECT_DEFAULT },
{
"default value", OBJECT_DEFAULT
},
/* OCLASS_LANGUAGE */
{ "language", OBJECT_LANGUAGE },
{
"language", OBJECT_LANGUAGE
},
/* OCLASS_LARGEOBJECT */
{ "large object", OBJECT_LARGEOBJECT },
{
"large object", OBJECT_LARGEOBJECT
},
/* OCLASS_OPERATOR */
{ "operator", OBJECT_OPERATOR },
{
"operator", OBJECT_OPERATOR
},
/* OCLASS_OPCLASS */
{ "operator class", OBJECT_OPCLASS },
{
"operator class", OBJECT_OPCLASS
},
/* OCLASS_OPFAMILY */
{ "operator family", OBJECT_OPFAMILY },
{
"operator family", OBJECT_OPFAMILY
},
/* OCLASS_AMOP */
{ "operator of access method", OBJECT_AMOP },
{
"operator of access method", OBJECT_AMOP
},
/* OCLASS_AMPROC */
{ "function of access method", OBJECT_AMPROC },
{
"function of access method", OBJECT_AMPROC
},
/* OCLASS_REWRITE */
{ "rule", OBJECT_RULE },
{
"rule", OBJECT_RULE
},
/* OCLASS_TRIGGER */
{ "trigger", OBJECT_TRIGGER },
{
"trigger", OBJECT_TRIGGER
},
/* OCLASS_SCHEMA */
{ "schema", OBJECT_SCHEMA },
{
"schema", OBJECT_SCHEMA
},
/* OCLASS_TSPARSER */
{ "text search parser", OBJECT_TSPARSER },
{
"text search parser", OBJECT_TSPARSER
},
/* OCLASS_TSDICT */
{ "text search dictionary", OBJECT_TSDICTIONARY },
{
"text search dictionary", OBJECT_TSDICTIONARY
},
/* OCLASS_TSTEMPLATE */
{ "text search template", OBJECT_TSTEMPLATE },
{
"text search template", OBJECT_TSTEMPLATE
},
/* OCLASS_TSCONFIG */
{ "text search configuration", OBJECT_TSCONFIGURATION },
{
"text search configuration", OBJECT_TSCONFIGURATION
},
/* OCLASS_ROLE */
{ "role", OBJECT_ROLE },
{
"role", OBJECT_ROLE
},
/* OCLASS_DATABASE */
{ "database", OBJECT_DATABASE },
{
"database", OBJECT_DATABASE
},
/* OCLASS_TBLSPACE */
{ "tablespace", OBJECT_TABLESPACE },
{
"tablespace", OBJECT_TABLESPACE
},
/* OCLASS_FDW */
{ "foreign-data wrapper", OBJECT_FDW },
{
"foreign-data wrapper", OBJECT_FDW
},
/* OCLASS_FOREIGN_SERVER */
{ "server", OBJECT_FOREIGN_SERVER },
{
"server", OBJECT_FOREIGN_SERVER
},
/* OCLASS_USER_MAPPING */
{ "user mapping", OBJECT_USER_MAPPING },
{
"user mapping", OBJECT_USER_MAPPING
},
/* OCLASS_DEFACL */
{ "default acl", OBJECT_DEFACL },
{
"default acl", OBJECT_DEFACL
},
/* OCLASS_EXTENSION */
{ "extension", OBJECT_EXTENSION },
{
"extension", OBJECT_EXTENSION
},
/* OCLASS_EVENT_TRIGGER */
{ "event trigger", OBJECT_EVENT_TRIGGER },
{
"event trigger", OBJECT_EVENT_TRIGGER
},
/* OCLASS_POLICY */
{ "policy", OBJECT_POLICY }
{
"policy", OBJECT_POLICY
}
};
const ObjectAddress InvalidObjectAddress =
@ -1751,9 +1850,9 @@ pg_get_object_address(PG_FUNCTION_ARGS)
type = (ObjectType) itype;
/*
* Convert the text array to the representation appropriate for the
* given object type. Most use a simple string Values list, but there
* are some exceptions.
* Convert the text array to the representation appropriate for the given
* object type. Most use a simple string Values list, but there are some
* exceptions.
*/
if (type == OBJECT_TYPE || type == OBJECT_DOMAIN || type == OBJECT_CAST ||
type == OBJECT_DOMCONSTRAINT)

View File

@ -175,9 +175,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
/*
* Check to see whether the table needs a TOAST table.
*
* If an update-in-place TOAST relfilenode is specified, force TOAST file
* creation even if it seems not to need one. This handles the case
* where the old cluster needed a TOAST table but the new cluster
* If an update-in-place TOAST relfilenode is specified, force TOAST
* file creation even if it seems not to need one. This handles the
* case where the old cluster needed a TOAST table but the new cluster
* would not normally create one.
*/
@ -260,9 +260,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
namespaceid = PG_TOAST_NAMESPACE;
/*
* Use binary-upgrade override for pg_type.oid, if supplied. We might
* be in the post-schema-restore phase where we are doing ALTER TABLE
* to create TOAST tables that didn't exist in the old cluster.
* Use binary-upgrade override for pg_type.oid, if supplied. We might be
* in the post-schema-restore phase where we are doing ALTER TABLE to
* create TOAST tables that didn't exist in the old cluster.
*/
if (IsBinaryUpgrade && OidIsValid(binary_upgrade_next_toast_pg_type_oid))
{

View File

@ -2150,6 +2150,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/* We always use the default collation for statistics */
ssup.ssup_collation = DEFAULT_COLLATION_OID;
ssup.ssup_nulls_first = false;
/*
* For now, don't perform abbreviated key conversion, because full values
* are required for MCV slot generation. Supporting that optimization

View File

@ -861,8 +861,8 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
* RLS (returns RLS_ENABLED) or not for this COPY statement.
*
* If the relation has a row security policy and we are to apply it
* then perform a "query" copy and allow the normal query processing to
* handle the policies.
* then perform a "query" copy and allow the normal query processing
* to handle the policies.
*
* If RLS is not enabled for this, then just fall through to the
* normal non-filtering relation handling.
@ -1408,19 +1408,20 @@ BeginCopy(bool is_from,
/*
* If we were passed in a relid, make sure we got the same one back
* after planning out the query. It's possible that it changed between
* when we checked the policies on the table and decided to use a query
* and now.
* after planning out the query. It's possible that it changed
* between when we checked the policies on the table and decided to
* use a query and now.
*/
if (queryRelId != InvalidOid)
{
Oid relid = linitial_oid(plan->relationOids);
/*
* There should only be one relationOid in this case, since we will
* only get here when we have changed the command for the user from
* a "COPY relation TO" to "COPY (SELECT * FROM relation) TO", to
* allow row level security policies to be applied.
* There should only be one relationOid in this case, since we
* will only get here when we have changed the command for the
* user from a "COPY relation TO" to "COPY (SELECT * FROM
* relation) TO", to allow row level security policies to be
* applied.
*/
Assert(list_length(plan->relationOids) == 1);

View File

@ -554,8 +554,8 @@ createdb(const CreatedbStmt *stmt)
* Force a checkpoint before starting the copy. This will force all dirty
* buffers, including those of unlogged tables, out to disk, to ensure
* source database is up-to-date on disk for the copy.
* FlushDatabaseBuffers() would suffice for that, but we also want
* to process any pending unlink requests. Otherwise, if a checkpoint
* FlushDatabaseBuffers() would suffice for that, but we also want to
* process any pending unlink requests. Otherwise, if a checkpoint
* happened while we're copying files, a file might be deleted just when
* we're about to copy it, causing the lstat() call in copydir() to fail
* with ENOENT.

View File

@ -57,13 +57,15 @@ typedef struct EventTriggerQueryState
bool in_sql_drop;
/* table_rewrite */
Oid table_rewrite_oid; /* InvalidOid, or set for table_rewrite event */
Oid table_rewrite_oid; /* InvalidOid, or set for
* table_rewrite event */
int table_rewrite_reason; /* AT_REWRITE reason */
/* Support for command collection */
bool commandCollectionInhibited;
CollectedCommand *currentCommand;
List *commandList; /* list of CollectedCommand; see deparse_utility.h */
List *commandList; /* list of CollectedCommand; see
* deparse_utility.h */
struct EventTriggerQueryState *previous;
} EventTriggerQueryState;
@ -2034,10 +2036,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
* object, the returned OID is Invalid. Don't return anything.
*
* One might think that a viable alternative would be to look up the
* Oid of the existing object and run the deparse with that. But since
* the parse tree might be different from the one that created the
* object in the first place, we might not end up in a consistent state
* anyway.
* Oid of the existing object and run the deparse with that. But
* since the parse tree might be different from the one that created
* the object in the first place, we might not end up in a consistent
* state anyway.
*/
if (cmd->type == SCT_Simple &&
!OidIsValid(cmd->d.simple.address.objectId))
@ -2074,10 +2076,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
identity = getObjectIdentity(&addr);
/*
* Obtain schema name, if any ("pg_temp" if a temp object).
* If the object class is not in the supported list here,
* we assume it's a schema-less object type, and thus
* "schema" remains set to NULL.
* Obtain schema name, if any ("pg_temp" if a temp
* object). If the object class is not in the supported
* list here, we assume it's a schema-less object type,
* and thus "schema" remains set to NULL.
*/
if (is_objectclass_supported(addr.classId))
{

View File

@ -984,6 +984,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
* quite messy.
*/
RangeTblEntry *rte;
rte = rt_fetch(((SampleScan *) plan)->scanrelid, es->rtable);
custom_name = get_tablesample_method_name(rte->tablesample->tsmid);
pname = psprintf("Sample Scan (%s)", custom_name);

View File

@ -921,9 +921,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
ReleaseSysCache(languageTuple);
/*
* Only superuser is allowed to create leakproof functions because leakproof
* functions can see tuples which have not yet been filtered out by security
* barrier views or row level security policies.
* Only superuser is allowed to create leakproof functions because
* leakproof functions can see tuples which have not yet been filtered out
* by security barrier views or row level security policies.
*/
if (isLeakProof && !superuser())
ereport(ERROR,
@ -940,6 +940,7 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
{
Oid typeid = typenameTypeId(NULL, lfirst(lc));
Oid elt = get_base_element_type(typeid);
typeid = elt ? elt : typeid;
get_transform_oid(typeid, languageOid, false);

View File

@ -199,8 +199,8 @@ RelationBuildRowSecurity(Relation relation)
/*
* Create a memory context to hold everything associated with this
* relation's row security policy. This makes it easy to clean up
* during a relcache flush.
* relation's row security policy. This makes it easy to clean up during
* a relcache flush.
*/
rscxt = AllocSetContextCreate(CacheMemoryContext,
"row security descriptor",
@ -209,8 +209,8 @@ RelationBuildRowSecurity(Relation relation)
ALLOCSET_SMALL_MAXSIZE);
/*
* Since rscxt lives under CacheMemoryContext, it is long-lived. Use
* a PG_TRY block to ensure it'll get freed if we fail partway through.
* Since rscxt lives under CacheMemoryContext, it is long-lived. Use a
* PG_TRY block to ensure it'll get freed if we fail partway through.
*/
PG_TRY();
{
@ -330,10 +330,10 @@ RelationBuildRowSecurity(Relation relation)
/*
* Check if no policies were added
*
* If no policies exist in pg_policy for this relation, then we
* need to create a single default-deny policy. We use InvalidOid for
* the Oid to indicate that this is the default-deny policy (we may
* decide to ignore the default policy if an extension adds policies).
* If no policies exist in pg_policy for this relation, then we need
* to create a single default-deny policy. We use InvalidOid for the
* Oid to indicate that this is the default-deny policy (we may decide
* to ignore the default policy if an extension adds policies).
*/
if (rsdesc->policies == NIL)
{
@ -435,8 +435,8 @@ RemovePolicyById(Oid policy_id)
/*
* Note that, unlike some of the other flags in pg_class, relrowsecurity
* is not just an indication of if policies exist. When relrowsecurity
* is set by a user, then all access to the relation must be through a
* is not just an indication of if policies exist. When relrowsecurity is
* set by a user, then all access to the relation must be through a
* policy. If no policy is defined for the relation then a default-deny
* policy is created and all records are filtered (except for queries from
* the owner).
@ -756,8 +756,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
errmsg("only USING expression allowed for SELECT, DELETE")));
/*
* If the command is INSERT then WITH CHECK should be the only
* expression provided.
* If the command is INSERT then WITH CHECK should be the only expression
* provided.
*/
if ((polcmd == ACL_INSERT_CHR)
&& stmt->qual != NULL)
@ -923,9 +923,9 @@ rename_policy(RenameStmt *stmt)
ObjectAddressSet(address, PolicyRelationId, opoloid);
/*
* Invalidate relation's relcache entry so that other backends (and
* this one too!) are sent SI message to make them rebuild relcache
* entries. (Ideally this should happen automatically...)
* Invalidate relation's relcache entry so that other backends (and this
* one too!) are sent SI message to make them rebuild relcache entries.
* (Ideally this should happen automatically...)
*/
CacheInvalidateRelcache(target_table);

View File

@ -566,8 +566,8 @@ nextval_internal(Oid relid)
PreventCommandIfReadOnly("nextval()");
/*
* Forbid this during parallel operation because, to make it work,
* the cooperating backends would need to share the backend-local cached
* Forbid this during parallel operation because, to make it work, the
* cooperating backends would need to share the backend-local cached
* sequence information. Currently, we don't support that.
*/
PreventCommandIfParallelMode("nextval()");
@ -702,10 +702,10 @@ nextval_internal(Oid relid)
/*
* If something needs to be WAL logged, acquire an xid, so this
* transaction's commit will trigger a WAL flush and wait for
* syncrep. It's sufficient to ensure the toplevel transaction has an xid,
* no need to assign xids subxacts, that'll already trigger an appropriate
* wait. (Have to do that here, so we're outside the critical section)
* transaction's commit will trigger a WAL flush and wait for syncrep.
* It's sufficient to ensure the toplevel transaction has an xid, no need
* to assign xids subxacts, that'll already trigger an appropriate wait.
* (Have to do that here, so we're outside the critical section)
*/
if (logit && RelationNeedsWAL(seqrel))
GetTopTransactionId();
@ -870,8 +870,8 @@ do_setval(Oid relid, int64 next, bool iscalled)
PreventCommandIfReadOnly("setval()");
/*
* Forbid this during parallel operation because, to make it work,
* the cooperating backends would need to share the backend-local cached
* Forbid this during parallel operation because, to make it work, the
* cooperating backends would need to share the backend-local cached
* sequence information. Currently, we don't support that.
*/
PreventCommandIfParallelMode("setval()");

View File

@ -7866,11 +7866,12 @@ ATPrepAlterColumnType(List **wqueue,
{
/*
* Set up an expression to transform the old data value to the new
* type. If a USING option was given, use the expression as transformed
* by transformAlterTableStmt, else just take the old value and try to
* coerce it. We do this first so that type incompatibility can be
* detected before we waste effort, and because we need the expression
* to be parsed against the original table row type.
* type. If a USING option was given, use the expression as
* transformed by transformAlterTableStmt, else just take the old
* value and try to coerce it. We do this first so that type
* incompatibility can be detected before we waste effort, and because
* we need the expression to be parsed against the original table row
* type.
*/
if (!transform)
{
@ -8221,8 +8222,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* specified in the policy's USING or WITH CHECK qual
* expressions. It might be possible to rewrite and recheck
* the policy expression, but punt for now. It's certainly
* easy enough to remove and recreate the policy; still,
* FIXME someday.
* easy enough to remove and recreate the policy; still, FIXME
* someday.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),

View File

@ -87,7 +87,8 @@ CreateRole(CreateRoleStmt *stmt)
bool createdb = false; /* Can the user create databases? */
bool canlogin = false; /* Can this user login? */
bool isreplication = false; /* Is this a replication role? */
bool bypassrls = false; /* Is this a row security enabled role? */
bool bypassrls = false; /* Is this a row security enabled
* role? */
int connlimit = -1; /* maximum connections allowed */
List *addroleto = NIL; /* roles to make this a member of */
List *rolemembers = NIL; /* roles to be members of this role */

View File

@ -530,8 +530,8 @@ vacuum_set_xid_limits(Relation rel,
/*
* Compute the multixact age for which freezing is urgent. This is
* normally autovacuum_multixact_freeze_max_age, but may be less if we
* are short of multixact member space.
* normally autovacuum_multixact_freeze_max_age, but may be less if we are
* short of multixact member space.
*/
effective_multixact_freeze_max_age = MultiXactMemberFreezeThreshold();
@ -1134,9 +1134,8 @@ vac_truncate_clog(TransactionId frozenXID,
return;
/*
* Truncate CLOG and CommitTs to the oldest computed value.
* Note we don't truncate multixacts; that will be done by the next
* checkpoint.
* Truncate CLOG and CommitTs to the oldest computed value. Note we don't
* truncate multixacts; that will be done by the next checkpoint.
*/
TruncateCLOG(frozenXID);
TruncateCommitTs(frozenXID, true);

View File

@ -337,6 +337,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
params->log_min_duration))
{
StringInfoData buf;
TimestampDifference(starttime, endtime, &secs, &usecs);
read_rate = 0;

View File

@ -405,10 +405,10 @@ ExecSupportsMarkRestore(Path *pathnode)
* that does, we presently come here only for ResultPath nodes,
* which represent Result plans without a child plan. So there is
* nothing to recurse to and we can just say "false". (This means
* that Result's support for mark/restore is in fact dead code.
* We keep it since it's not much code, and someday the planner
* might be smart enough to use it. That would require making
* this function smarter too, of course.)
* that Result's support for mark/restore is in fact dead code. We
* keep it since it's not much code, and someday the planner might
* be smart enough to use it. That would require making this
* function smarter too, of course.)
*/
Assert(IsA(pathnode, ResultPath));
return false;

View File

@ -838,8 +838,8 @@ retry:
* Ordinarily, at this point the search should have found the originally
* inserted tuple (if any), unless we exited the loop early because of
* conflict. However, it is possible to define exclusion constraints for
* which that wouldn't be true --- for instance, if the operator is <>.
* So we no longer complain if found_self is still false.
* which that wouldn't be true --- for instance, if the operator is <>. So
* we no longer complain if found_self is still false.
*/
econtext->ecxt_scantuple = save_scantuple;

View File

@ -153,16 +153,16 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
* If the transaction is read-only, we need to check if any writes are
* planned to non-temporary tables. EXPLAIN is considered read-only.
*
* Don't allow writes in parallel mode. Supporting UPDATE and DELETE would
* require (a) storing the combocid hash in shared memory, rather than
* synchronizing it just once at the start of parallelism, and (b) an
* Don't allow writes in parallel mode. Supporting UPDATE and DELETE
* would require (a) storing the combocid hash in shared memory, rather
* than synchronizing it just once at the start of parallelism, and (b) an
* alternative to heap_update()'s reliance on xmax for mutual exclusion.
* INSERT may have no such troubles, but we forbid it to simplify the
* checks.
*
* We have lower-level defenses in CommandCounterIncrement and elsewhere
* against performing unsafe operations in parallel mode, but this gives
* a more user-friendly error message.
* against performing unsafe operations in parallel mode, but this gives a
* more user-friendly error message.
*/
if ((XactReadOnly || IsInParallelMode()) &&
!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
@ -695,10 +695,9 @@ ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
int col = -1;
/*
* When the query doesn't explicitly update any columns, allow the
* query if we have permission on any column of the rel. This is
* to handle SELECT FOR UPDATE as well as possible corner cases in
* UPDATE.
* When the query doesn't explicitly update any columns, allow the query
* if we have permission on any column of the rel. This is to handle
* SELECT FOR UPDATE as well as possible corner cases in UPDATE.
*/
if (bms_is_empty(modifiedCols))
{
@ -742,8 +741,8 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
ListCell *l;
/*
* Fail if write permissions are requested in parallel mode for
* table (temp or non-temp), otherwise fail for any non-temp table.
* Fail if write permissions are requested in parallel mode for table
* (temp or non-temp), otherwise fail for any non-temp table.
*/
foreach(l, plannedstmt->rtable)
{
@ -1773,11 +1772,11 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
/*
* WITH CHECK OPTION checks are intended to ensure that the new tuple
* is visible (in the case of a view) or that it passes the
* 'with-check' policy (in the case of row security).
* If the qual evaluates to NULL or FALSE, then the new tuple won't be
* included in the view or doesn't pass the 'with-check' policy for the
* table. We need ExecQual to return FALSE for NULL to handle the view
* case (the opposite of what we do above for CHECK constraints).
* 'with-check' policy (in the case of row security). If the qual
* evaluates to NULL or FALSE, then the new tuple won't be included in
* the view or doesn't pass the 'with-check' policy for the table. We
* need ExecQual to return FALSE for NULL to handle the view case (the
* opposite of what we do above for CHECK constraints).
*/
if (!ExecQual((List *) wcoExpr, econtext, false))
{
@ -1789,12 +1788,13 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
switch (wco->kind)
{
/*
* For WITH CHECK OPTIONs coming from views, we might be able to
* provide the details on the row, depending on the permissions
* on the relation (that is, if the user could view it directly
* anyway). For RLS violations, we don't include the data since
* we don't know if the user should be able to view the tuple as
* as that depends on the USING policy.
* For WITH CHECK OPTIONs coming from views, we might be
* able to provide the details on the row, depending on
* the permissions on the relation (that is, if the user
* could view it directly anyway). For RLS violations, we
* don't include the data since we don't know if the user
* should be able to view the tuple as as that depends on
* the USING policy.
*/
case WCO_VIEW_CHECK:
insertedCols = GetInsertedColumns(resultRelInfo, estate);
@ -1915,8 +1915,8 @@ ExecBuildSlotValueDescription(Oid reloid,
{
/*
* No table-level SELECT, so need to make sure they either have
* SELECT rights on the column or that they have provided the
* data for the column. If not, omit this column from the error
* SELECT rights on the column or that they have provided the data
* for the column. If not, omit this column from the error
* message.
*/
aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
@ -2313,9 +2313,9 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
* doing so would require changing heap_update and
* heap_delete to not complain about updating "invisible"
* tuples, which seems pretty scary (heap_lock_tuple will
* not complain, but few callers expect HeapTupleInvisible,
* and we're not one of them). So for now, treat the tuple
* as deleted and do not process.
* not complain, but few callers expect
* HeapTupleInvisible, and we're not one of them). So for
* now, treat the tuple as deleted and do not process.
*/
ReleaseBuffer(buffer);
return NULL;

View File

@ -441,8 +441,8 @@ initialize_phase(AggState *aggstate, int newphase)
}
/*
* If this isn't the last phase, we need to sort appropriately for the next
* phase in sequence.
* If this isn't the last phase, we need to sort appropriately for the
* next phase in sequence.
*/
if (newphase < aggstate->numphases - 1)
{
@ -540,9 +540,8 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
/*
* (Re)set transValue to the initial value.
*
* Note that when the initial value is pass-by-ref, we must copy
* it (into the aggcontext) since we will pfree the transValue
* later.
* Note that when the initial value is pass-by-ref, we must copy it (into
* the aggcontext) since we will pfree the transValue later.
*/
if (peraggstate->initValueIsNull)
pergroupstate->transValue = peraggstate->initValue;
@ -560,11 +559,11 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
/*
* If the initial value for the transition state doesn't exist in
* the pg_aggregate table then we will let the first non-NULL
* value returned from the outer procNode become the initial
* value. (This is useful for aggregates like max() and min().)
* The noTransValue flag signals that we still need to do this.
* If the initial value for the transition state doesn't exist in the
* pg_aggregate table then we will let the first non-NULL value returned
* from the outer procNode become the initial value. (This is useful for
* aggregates like max() and min().) The noTransValue flag signals that we
* still need to do this.
*/
pergroupstate->noTransValue = peraggstate->initValueIsNull;
}
@ -1225,8 +1224,7 @@ project_aggregates(AggState *aggstate)
ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
/*
* Check the qual (HAVING clause); if the group does not match, ignore
* it.
* Check the qual (HAVING clause); if the group does not match, ignore it.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
@ -1521,8 +1519,8 @@ agg_retrieve_direct(AggState *aggstate)
/*
* get state info from node
*
* econtext is the per-output-tuple expression context
* tmpcontext is the per-input-tuple expression context
* econtext is the per-output-tuple expression context tmpcontext is the
* per-input-tuple expression context
*/
econtext = aggstate->ss.ps.ps_ExprContext;
tmpcontext = aggstate->tmpcontext;
@ -1729,7 +1727,8 @@ agg_retrieve_direct(AggState *aggstate)
firstSlot,
InvalidBuffer,
true);
aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
aggstate->grp_firstTuple = NULL; /* don't keep two
* pointers */
/* set up for first advance_aggregates call */
tmpcontext->ecxt_outertuple = firstSlot;
@ -1787,8 +1786,8 @@ agg_retrieve_direct(AggState *aggstate)
* Use the representative input tuple for any references to
* non-aggregated input columns in aggregate direct args, the node
* qual, and the tlist. (If we are not grouping, and there are no
* input rows at all, we will come here with an empty firstSlot ...
* but if not grouping, there can't be any references to
* input rows at all, we will come here with an empty firstSlot
* ... but if not grouping, there can't be any references to
* non-aggregated input columns, so no problem.)
*/
econtext->ecxt_outertuple = firstSlot;
@ -1803,8 +1802,8 @@ agg_retrieve_direct(AggState *aggstate)
finalize_aggregates(aggstate, peragg, pergroup, currentSet);
/*
* If there's no row to project right now, we must continue rather than
* returning a null since there might be more groups.
* If there's no row to project right now, we must continue rather
* than returning a null since there might be more groups.
*/
result = project_aggregates(aggstate);
if (result)
@ -2777,6 +2776,7 @@ AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
{
AggState *aggstate = ((AggState *) fcinfo->context);
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
*aggcontext = cxt->ecxt_per_tuple_memory;
}
return AGG_CONTEXT_AGGREGATE;

View File

@ -500,8 +500,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
/*
* If there's not enough space to store the projected number of tuples
* and the required bucket headers, we will need multiple batches.
* If there's not enough space to store the projected number of tuples and
* the required bucket headers, we will need multiple batches.
*/
if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
{
@ -512,8 +512,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
long bucket_size;
/*
* Estimate the number of buckets we'll want to have when work_mem
* is entirely full. Each bucket will contain a bucket pointer plus
* Estimate the number of buckets we'll want to have when work_mem is
* entirely full. Each bucket will contain a bucket pointer plus
* NTUP_PER_BUCKET tuples, whose projected size already includes
* overhead for the hash code, pointer to the next tuple, etc.
*/
@ -527,9 +527,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
* Buckets are simple pointers to hashjoin tuples, while tupsize
* includes the pointer, hash code, and MinimalTupleData. So buckets
* should never really exceed 25% of work_mem (even for
* NTUP_PER_BUCKET=1); except maybe * for work_mem values that are
* not 2^N bytes, where we might get more * because of doubling.
* So let's look for 50% here.
* NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not
* 2^N bytes, where we might get more * because of doubling. So let's
* look for 50% here.
*/
Assert(bucket_bytes <= hash_table_bytes / 2);
@ -671,6 +671,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
while (oldchunks != NULL)
{
HashMemoryChunk nextchunk = oldchunks->next;
/* position within the buffer (up to oldchunks->used) */
size_t idx = 0;
@ -692,6 +693,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
/* keep tuple in memory - copy it into the new chunk */
HashJoinTuple copyTuple =
(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
memcpy(copyTuple, hashTuple, hashTupleSize);
/* and add it back to the appropriate bucket */
@ -756,8 +758,8 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
return;
/*
* We already know the optimal number of buckets, so let's just
* compute the log2_nbuckets for it.
* We already know the optimal number of buckets, so let's just compute
* the log2_nbuckets for it.
*/
hashtable->nbuckets = hashtable->nbuckets_optimal;
hashtable->log2_nbuckets = my_log2(hashtable->nbuckets_optimal);
@ -771,10 +773,10 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
#endif
/*
* Just reallocate the proper number of buckets - we don't need to
* walk through them - we can walk the dense-allocated chunks
* (just like in ExecHashIncreaseNumBatches, but without all the
* copying into new chunks)
* Just reallocate the proper number of buckets - we don't need to walk
* through them - we can walk the dense-allocated chunks (just like in
* ExecHashIncreaseNumBatches, but without all the copying into new
* chunks)
*/
hashtable->buckets =
(HashJoinTuple *) repalloc(hashtable->buckets,
@ -787,6 +789,7 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
{
/* process all tuples stored in this chunk */
size_t idx = 0;
while (idx < chunk->used)
{
HashJoinTuple hashTuple = (HashJoinTuple) (chunk->data + idx);
@ -869,7 +872,8 @@ ExecHashTableInsert(HashJoinTable hashtable,
/*
* Increase the (optimal) number of buckets if we just exceeded the
* NTUP_PER_BUCKET threshold, but only when there's still a single batch.
* NTUP_PER_BUCKET threshold, but only when there's still a single
* batch.
*/
if ((hashtable->nbatch == 1) &&
(hashtable->nbuckets_optimal <= INT_MAX / 2) && /* overflow protection */
@ -1663,8 +1667,8 @@ dense_alloc(HashJoinTable hashtable, Size size)
}
/*
* See if we have enough space for it in the current chunk (if any).
* If not, allocate a fresh chunk.
* See if we have enough space for it in the current chunk (if any). If
* not, allocate a fresh chunk.
*/
if ((hashtable->chunks == NULL) ||
(hashtable->chunks->maxlen - hashtable->chunks->used) < size)

View File

@ -106,8 +106,8 @@ IndexOnlyNext(IndexOnlyScanState *node)
* away, because the tuple is still visible until the deleting
* transaction commits or the statement ends (if it's our
* transaction). In either case, the lock on the VM buffer will have
* been released (acting as a write barrier) after clearing the
* bit. And for us to have a snapshot that includes the deleting
* been released (acting as a write barrier) after clearing the bit.
* And for us to have a snapshot that includes the deleting
* transaction (making the tuple invisible), we must have acquired
* ProcArrayLock after that time, acting as a read barrier.
*

View File

@ -288,9 +288,9 @@ next_indextuple:
* Can we return this tuple immediately, or does it need to be pushed
* to the reorder queue? If the ORDER BY expression values returned
* by the index were inaccurate, we can't return it yet, because the
* next tuple from the index might need to come before this one.
* Also, we can't return it yet if there are any smaller tuples in the
* queue already.
* next tuple from the index might need to come before this one. Also,
* we can't return it yet if there are any smaller tuples in the queue
* already.
*/
if (!was_exact || (topmost && cmp_orderbyvals(lastfetched_vals,
lastfetched_nulls,

View File

@ -196,11 +196,12 @@ lnext:
* case, so as to avoid the "Halloween problem" of repeated
* update attempts. In the latter case it might be sensible
* to fetch the updated tuple instead, but doing so would
* require changing heap_update and heap_delete to not complain
* about updating "invisible" tuples, which seems pretty scary
* (heap_lock_tuple will not complain, but few callers expect
* HeapTupleInvisible, and we're not one of them). So for now,
* treat the tuple as deleted and do not process.
* require changing heap_update and heap_delete to not
* complain about updating "invisible" tuples, which seems
* pretty scary (heap_lock_tuple will not complain, but few
* callers expect HeapTupleInvisible, and we're not one of
* them). So for now, treat the tuple as deleted and do not
* process.
*/
goto lnext;

View File

@ -139,10 +139,10 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
/*
* It isn't feasible to perform abbreviated key conversion, since
* tuples are pulled into mergestate's binary heap as needed. It would
* likely be counter-productive to convert tuples into an abbreviated
* representation as they're pulled up, so opt out of that additional
* optimization entirely.
* tuples are pulled into mergestate's binary heap as needed. It
* would likely be counter-productive to convert tuples into an
* abbreviated representation as they're pulled up, so opt out of that
* additional optimization entirely.
*/
sortKey->abbreviate = false;

View File

@ -232,8 +232,8 @@ MJExamineQuals(List *mergeclauses,
/*
* sortsupport routine must know if abbreviation optimization is
* applicable in principle. It is never applicable for merge joins
* because there is no convenient opportunity to convert to alternative
* representation.
* because there is no convenient opportunity to convert to
* alternative representation.
*/
clause->ssup.abbreviate = false;

View File

@ -321,8 +321,8 @@ ExecInsert(ModifyTableState *mtstate,
/*
* Check any RLS INSERT WITH CHECK policies
*
* ExecWithCheckOptions() will skip any WCOs which are not of
* the kind we are looking for at this point.
* ExecWithCheckOptions() will skip any WCOs which are not of the kind
* we are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_RLS_INSERT_CHECK,
@ -383,9 +383,9 @@ ExecInsert(ModifyTableState *mtstate,
else
{
/*
* In case of ON CONFLICT DO NOTHING, do nothing.
* However, verify that the tuple is visible to the
* executor's MVCC snapshot at higher isolation levels.
* In case of ON CONFLICT DO NOTHING, do nothing. However,
* verify that the tuple is visible to the executor's MVCC
* snapshot at higher isolation levels.
*/
Assert(onconflict == ONCONFLICT_NOTHING);
ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
@ -475,17 +475,16 @@ ExecInsert(ModifyTableState *mtstate,
list_free(recheckIndexes);
/*
* Check any WITH CHECK OPTION constraints from parent views. We
* are required to do this after testing all constraints and
* uniqueness violations per the SQL spec, so we do it after actually
* inserting the record into the heap and all indexes.
* Check any WITH CHECK OPTION constraints from parent views. We are
* required to do this after testing all constraints and uniqueness
* violations per the SQL spec, so we do it after actually inserting the
* record into the heap and all indexes.
*
* ExecWithCheckOptions will elog(ERROR) if a violation is found, so
* the tuple will never be seen, if it violates the WITH CHECK
* OPTION.
* ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
* tuple will never be seen, if it violates the WITH CHECK OPTION.
*
* ExecWithCheckOptions() will skip any WCOs which are not of
* the kind we are looking for at this point.
* ExecWithCheckOptions() will skip any WCOs which are not of the kind we
* are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
@ -860,8 +859,8 @@ ExecUpdate(ItemPointer tupleid,
* triggers then trigger.c will have done heap_lock_tuple to lock the
* correct tuple, so there's no need to do them again.)
*
* ExecWithCheckOptions() will skip any WCOs which are not of
* the kind we are looking for at this point.
* ExecWithCheckOptions() will skip any WCOs which are not of the kind
* we are looking for at this point.
*/
lreplace:;
if (resultRelInfo->ri_WithCheckOptions != NIL)
@ -990,13 +989,13 @@ lreplace:;
list_free(recheckIndexes);
/*
* Check any WITH CHECK OPTION constraints from parent views. We
* are required to do this after testing all constraints and
* uniqueness violations per the SQL spec, so we do it after actually
* updating the record in the heap and all indexes.
* Check any WITH CHECK OPTION constraints from parent views. We are
* required to do this after testing all constraints and uniqueness
* violations per the SQL spec, so we do it after actually updating the
* record in the heap and all indexes.
*
* ExecWithCheckOptions() will skip any WCOs which are not of
* the kind we are looking for at this point.
* ExecWithCheckOptions() will skip any WCOs which are not of the kind we
* are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
@ -1143,9 +1142,9 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/*
* Make tuple and any needed join variables available to ExecQual and
* ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
* the target's existing tuple is installed in the scantuple. EXCLUDED has
* been made to reference INNER_VAR in setrefs.c, but there is no other
* redirection.
* the target's existing tuple is installed in the scantuple. EXCLUDED
* has been made to reference INNER_VAR in setrefs.c, but there is no
* other redirection.
*/
econtext->ecxt_scantuple = mtstate->mt_existing;
econtext->ecxt_innertuple = excludedSlot;

View File

@ -60,7 +60,8 @@ SampleNext(SampleScanState *node)
if (tuple)
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
tsdesc->heapScan->rs_cbuf, /* buffer associated with this tuple */
tsdesc->heapScan->rs_cbuf, /* buffer associated
* with this tuple */
false); /* don't pfree this pointer */
else
ExecClearTuple(slot);

View File

@ -1344,11 +1344,11 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
}
/*
* If told to be read-only, or in parallel mode, verify that this query
* is in fact read-only. This can't be done earlier because we need to
* look at the finished, planned queries. (In particular, we don't want
* to do it between GetCachedPlan and PortalDefineQuery, because throwing
* an error between those steps would result in leaking our plancache
* If told to be read-only, or in parallel mode, verify that this query is
* in fact read-only. This can't be done earlier because we need to look
* at the finished, planned queries. (In particular, we don't want to do
* it between GetCachedPlan and PortalDefineQuery, because throwing an
* error between those steps would result in leaking our plancache
* refcount.)
*/
if (read_only || IsInParallelMode())

View File

@ -571,10 +571,9 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
int err;
/*
* If SSL renegotiations are enabled and we're getting close to the
* limit, start one now; but avoid it if there's one already in
* progress. Request the renegotiation 1kB before the limit has
* actually expired.
* If SSL renegotiations are enabled and we're getting close to the limit,
* start one now; but avoid it if there's one already in progress.
* Request the renegotiation 1kB before the limit has actually expired.
*/
if (ssl_renegotiation_limit && !in_ssl_renegotiation &&
port->count > (ssl_renegotiation_limit - 1) * 1024L)
@ -583,12 +582,12 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
/*
* The way we determine that a renegotiation has completed is by
* observing OpenSSL's internal renegotiation counter. Make sure
* we start out at zero, and assume that the renegotiation is
* complete when the counter advances.
* observing OpenSSL's internal renegotiation counter. Make sure we
* start out at zero, and assume that the renegotiation is complete
* when the counter advances.
*
* OpenSSL provides SSL_renegotiation_pending(), but this doesn't
* seem to work in testing.
* OpenSSL provides SSL_renegotiation_pending(), but this doesn't seem
* to work in testing.
*/
SSL_clear_num_renegotiations(port->ssl);
@ -658,9 +657,9 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
}
/*
* if renegotiation is still ongoing, and we've gone beyond the
* limit, kill the connection now -- continuing to use it can be
* considered a security problem.
* if renegotiation is still ongoing, and we've gone beyond the limit,
* kill the connection now -- continuing to use it can be considered a
* security problem.
*/
if (in_ssl_renegotiation &&
port->count > ssl_renegotiation_limit * 1024L)

View File

@ -162,8 +162,8 @@ retry:
/*
* We'll retry the read. Most likely it will return immediately
* because there's still no data available, and we'll wait
* for the socket to become ready again.
* because there's still no data available, and we'll wait for the
* socket to become ready again.
*/
}
goto retry;
@ -241,8 +241,8 @@ retry:
/*
* We'll retry the write. Most likely it will return immediately
* because there's still no data available, and we'll wait
* for the socket to become ready again.
* because there's still no data available, and we'll wait for the
* socket to become ready again.
*/
}
goto retry;

View File

@ -1382,8 +1382,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
* situations and is generally considered bad practice. We keep the
* capability around for backwards compatibility, but we might want to
* remove it at some point in the future. Users who still need to strip
* the realm off would be better served by using an appropriate regex in
* a pg_ident.conf mapping.
* the realm off would be better served by using an appropriate regex in a
* pg_ident.conf mapping.
*/
if (hbaline->auth_method == uaGSS ||
hbaline->auth_method == uaSSPI)

View File

@ -111,13 +111,12 @@ mq_putmessage(char msgtype, const char *s, size_t len)
shm_mq_result result;
/*
* If we're sending a message, and we have to wait because the
* queue is full, and then we get interrupted, and that interrupt
* results in trying to send another message, we respond by detaching
* the queue. There's no way to return to the original context, but
* even if there were, just queueing the message would amount to
* indefinitely postponing the response to the interrupt. So we do
* this instead.
* If we're sending a message, and we have to wait because the queue is
* full, and then we get interrupted, and that interrupt results in trying
* to send another message, we respond by detaching the queue. There's no
* way to return to the original context, but even if there were, just
* queueing the message would amount to indefinitely postponing the
* response to the interrupt. So we do this instead.
*/
if (pq_mq_busy)
{
@ -166,10 +165,10 @@ mq_putmessage_noblock(char msgtype, const char *s, size_t len)
{
/*
* While the shm_mq machinery does support sending a message in
* non-blocking mode, there's currently no way to try sending beginning
* to send the message that doesn't also commit us to completing the
* transmission. This could be improved in the future, but for now
* we don't need it.
* non-blocking mode, there's currently no way to try sending beginning to
* send the message that doesn't also commit us to completing the
* transmission. This could be improved in the future, but for now we
* don't need it.
*/
elog(ERROR, "not currently supported");
}

View File

@ -672,17 +672,17 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
else if (query->groupingSets)
{
/*
* If we have grouping sets with expressions, we probably
* don't have uniqueness and analysis would be hard. Punt.
* If we have grouping sets with expressions, we probably don't have
* uniqueness and analysis would be hard. Punt.
*/
if (query->groupClause)
return false;
/*
* If we have no groupClause (therefore no grouping expressions),
* we might have one or many empty grouping sets. If there's just
* one, then we're returning only one row and are certainly unique.
* But otherwise, we know we're certainly not unique.
* If we have no groupClause (therefore no grouping expressions), we
* might have one or many empty grouping sets. If there's just one,
* then we're returning only one row and are certainly unique. But
* otherwise, we know we're certainly not unique.
*/
if (list_length(query->groupingSets) == 1 &&
((GroupingSet *) linitial(query->groupingSets))->kind == GROUPING_SET_EMPTY)

View File

@ -865,13 +865,14 @@ inheritance_planner(PlannerInfo *root)
*
* Note that any RTEs with security barrier quals will be turned into
* subqueries during planning, and so we must create copies of them too,
* except where they are target relations, which will each only be used
* in a single plan.
* except where they are target relations, which will each only be used in
* a single plan.
*/
resultRTindexes = bms_add_member(resultRTindexes, parentRTindex);
foreach(lc, root->append_rel_list)
{
AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc);
if (appinfo->parent_relid == parentRTindex)
resultRTindexes = bms_add_member(resultRTindexes,
appinfo->child_relid);
@ -1299,6 +1300,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
foreach(lc, parse->groupClause)
{
SortGroupClause *gc = lfirst(lc);
if (gc->tleSortGroupRef > maxref)
maxref = gc->tleSortGroupRef;
}
@ -1333,6 +1335,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
foreach(lc, groupclause)
{
SortGroupClause *gc = lfirst(lc);
tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
}
@ -1810,6 +1813,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
foreach(lc, parse->groupClause)
{
SortGroupClause *gc = lfirst(lc);
grouping_map[gc->tleSortGroupRef] = groupColIdx[i++];
}
@ -1842,9 +1846,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
else if (parse->hasAggs || (parse->groupingSets && parse->groupClause))
{
/*
* Output is in sorted order by group_pathkeys if, and only if,
* there is a single rollup operation on a non-empty list of
* grouping expressions.
* Output is in sorted order by group_pathkeys if, and only
* if, there is a single rollup operation on a non-empty list
* of grouping expressions.
*/
if (list_length(rollup_groupclauses) == 1
&& list_length(linitial(rollup_groupclauses)) > 0)
@ -1864,8 +1868,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
result_plan);
/*
* these are destroyed by build_grouping_chain, so make sure we
* don't try and touch them again
* these are destroyed by build_grouping_chain, so make sure
* we don't try and touch them again
*/
rollup_groupclauses = NIL;
rollup_lists = NIL;
@ -1904,20 +1908,20 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
int nrows = list_length(parse->groupingSets);
/*
* No aggregates, and no GROUP BY, but we have a HAVING qual or
* grouping sets (which by elimination of cases above must
* No aggregates, and no GROUP BY, but we have a HAVING qual
* or grouping sets (which by elimination of cases above must
* consist solely of empty grouping sets, since otherwise
* groupClause will be non-empty).
*
* This is a degenerate case in which we are supposed to emit
* either 0 or 1 row for each grouping set depending on whether
* HAVING succeeds. Furthermore, there cannot be any variables
* in either HAVING or the targetlist, so we actually do not
* need the FROM table at all! We can just throw away the
* plan-so-far and generate a Result node. This is a
* sufficiently unusual corner case that it's not worth
* contorting the structure of this routine to avoid having to
* generate the plan in the first place.
* either 0 or 1 row for each grouping set depending on
* whether HAVING succeeds. Furthermore, there cannot be any
* variables in either HAVING or the targetlist, so we
* actually do not need the FROM table at all! We can just
* throw away the plan-so-far and generate a Result node.
* This is a sufficiently unusual corner case that it's not
* worth contorting the structure of this routine to avoid
* having to generate the plan in the first place.
*/
result_plan = (Plan *) make_result(root,
tlist,
@ -2279,6 +2283,7 @@ remap_groupColIdx(PlannerInfo *root, List *groupClause)
foreach(lc, groupClause)
{
SortGroupClause *clause = lfirst(lc);
new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
}
@ -2366,8 +2371,8 @@ build_grouping_chain(PlannerInfo *root,
/*
* sort_plan includes the cost of result_plan over again, which is not
* what we want (since it's not actually running that plan). So correct
* the cost figures.
* what we want (since it's not actually running that plan). So
* correct the cost figures.
*/
sort_plan->startup_cost -= result_plan->total_cost;
@ -2716,6 +2721,7 @@ select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
switch (strength)
{
case LCS_NONE:
/*
* We don't need a tuple lock, only the ability to re-fetch
* the row. Regular tables support ROW_MARK_REFERENCE, but if
@ -3152,21 +3158,21 @@ extract_rollup_sets(List *groupingSets)
return list_make1(groupingSets);
/*
* We don't strictly need to remove duplicate sets here, but if we
* don't, they tend to become scattered through the result, which is
* a bit confusing (and irritating if we ever decide to optimize them
* out). So we remove them here and add them back after.
* We don't strictly need to remove duplicate sets here, but if we don't,
* they tend to become scattered through the result, which is a bit
* confusing (and irritating if we ever decide to optimize them out). So
* we remove them here and add them back after.
*
* For each non-duplicate set, we fill in the following:
*
* orig_sets[i] = list of the original set lists
* set_masks[i] = bitmapset for testing inclusion
* adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
* orig_sets[i] = list of the original set lists set_masks[i] = bitmapset
* for testing inclusion adjacency[i] = array [n, v1, v2, ... vn] of
* adjacency indices
*
* chains[i] will be the result group this set is assigned to.
*
* We index all of these from 1 rather than 0 because it is convenient
* to leave 0 free for the NIL node in the graph algorithm.
* We index all of these from 1 rather than 0 because it is convenient to
* leave 0 free for the NIL node in the graph algorithm.
*/
orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
@ -3193,6 +3199,7 @@ extract_rollup_sets(List *groupingSets)
if (j_size == list_length(candidate))
{
int k;
for (k = j; k < i; ++k)
{
if (bms_equal(set_masks[k], candidate_set))
@ -3343,6 +3350,7 @@ reorder_grouping_sets(List *groupingsets, List *sortclause)
{
SortGroupClause *sc = list_nth(sortclause, list_length(previous));
int ref = sc->tleSortGroupRef;
if (list_member_int(new_elems, ref))
{
previous = lappend_int(previous, ref);

View File

@ -1475,8 +1475,8 @@ contain_leaked_vars_walker(Node *node, void *context)
ListCell *rarg;
/*
* Check the comparison function and arguments passed to it for
* each pair of row elements.
* Check the comparison function and arguments passed to it
* for each pair of row elements.
*/
forthree(opid, rcexpr->opnos,
larg, rcexpr->largs,

View File

@ -427,6 +427,7 @@ List *
infer_arbiter_indexes(PlannerInfo *root)
{
OnConflictExpr *onconflict = root->parse->onConflict;
/* Iteration state */
Relation relation;
Oid relationObjectId;
@ -548,8 +549,8 @@ infer_arbiter_indexes(PlannerInfo *root)
goto next;
/*
* Note that we do not perform a check against indcheckxmin (like
* e.g. get_relation_info()) here to eliminate candidates, because
* Note that we do not perform a check against indcheckxmin (like e.g.
* get_relation_info()) here to eliminate candidates, because
* uniqueness checking only cares about the most recently committed
* tuple versions.
*/

View File

@ -578,12 +578,13 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
/* we do NOT descend into the contained expression */
return false;
case PVC_RECURSE_AGGREGATES:
/*
* we do NOT descend into the contained expression,
* even if the caller asked for it, because we never
* actually evaluate it - the result is driven entirely
* off the associated GROUP BY clause, so we never need
* to extract the actual Vars here.
* we do NOT descend into the contained expression, even if
* the caller asked for it, because we never actually evaluate
* it - the result is driven entirely off the associated GROUP
* BY clause, so we never need to extract the actual Vars
* here.
*/
return false;
}

View File

@ -335,7 +335,11 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
Assert(false); /* can't happen */
break;
case EXPR_KIND_OTHER:
/* Accept aggregate/grouping here; caller must throw error if wanted */
/*
* Accept aggregate/grouping here; caller must throw error if
* wanted
*/
break;
case EXPR_KIND_JOIN_ON:
case EXPR_KIND_JOIN_USING:
@ -348,7 +352,11 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
case EXPR_KIND_FROM_SUBSELECT:
/* Should only be possible in a LATERAL subquery */
Assert(pstate->p_lateral_active);
/* Aggregate/grouping scope rules make it worth being explicit here */
/*
* Aggregate/grouping scope rules make it worth being explicit
* here
*/
if (isAgg)
err = _("aggregate functions are not allowed in FROM clause of their own query level");
else
@ -985,9 +993,9 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
/*
* If there was only one grouping set in the expansion, AND if the
* groupClause is non-empty (meaning that the grouping set is not empty
* either), then we can ditch the grouping set and pretend we just had
* a normal GROUP BY.
* groupClause is non-empty (meaning that the grouping set is not
* empty either), then we can ditch the grouping set and pretend we
* just had a normal GROUP BY.
*/
if (list_length(gsets) == 1 && qry->groupClause)
qry->groupingSets = NIL;
@ -1012,8 +1020,8 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
* Build a list of the acceptable GROUP BY expressions for use by
* check_ungrouped_columns().
*
* We get the TLE, not just the expr, because GROUPING wants to know
* the sortgroupref.
* We get the TLE, not just the expr, because GROUPING wants to know the
* sortgroupref.
*/
foreach(l, qry->groupClause)
{
@ -1052,13 +1060,14 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
* scans. (Note we have to flatten aliases before this.)
*
* Track Vars that are included in all grouping sets separately in
* groupClauseCommonVars, since these are the only ones we can use to check
* for functional dependencies.
* groupClauseCommonVars, since these are the only ones we can use to
* check for functional dependencies.
*/
have_non_var_grouping = false;
foreach(l, groupClauses)
{
TargetEntry *tle = lfirst(l);
if (!IsA(tle->expr, Var))
{
have_non_var_grouping = true;
@ -1411,8 +1420,8 @@ finalize_grouping_exprs_walker(Node *node,
GroupingFunc *grp = (GroupingFunc *) node;
/*
* We only need to check GroupingFunc nodes at the exact level to which
* they belong, since they cannot mix levels in arguments.
* We only need to check GroupingFunc nodes at the exact level to
* which they belong, since they cannot mix levels in arguments.
*/
if ((int) grp->agglevelsup == context->sublevels_up)
@ -1627,6 +1636,7 @@ cmp_list_len_asc(const void *a, const void *b)
{
int la = list_length(*(List *const *) a);
int lb = list_length(*(List *const *) b);
return (la > lb) ? 1 : (la == lb) ? 0 : -1;
}
@ -1666,9 +1676,9 @@ expand_grouping_sets(List *groupingSets, int limit)
}
/*
* Do cartesian product between sublists of expanded_groups.
* While at it, remove any duplicate elements from individual
* grouping sets (we must NOT change the number of sets though)
* Do cartesian product between sublists of expanded_groups. While at it,
* remove any duplicate elements from individual grouping sets (we must
* NOT change the number of sets though)
*/
foreach(lc, (List *) linitial(expanded_groups))

View File

@ -434,6 +434,7 @@ transformTableSampleEntry(ParseState *pstate, RangeTableSample *rv)
if (!rv->relation->schemaname)
{
Index levelsup;
cte = scanNameSpaceForCTE(pstate, rv->relation->relname, &levelsup);
}
@ -1776,6 +1777,7 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
case T_RowExpr:
{
RowExpr *r = (RowExpr *) expr;
if (r->row_format == COERCE_IMPLICIT_CAST)
return flatten_grouping_sets((Node *) r->args,
false, NULL);
@ -1792,7 +1794,8 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
/*
* at the top level, we skip over all empty grouping sets; the
* caller can supply the canonical GROUP BY () if nothing is left.
* caller can supply the canonical GROUP BY () if nothing is
* left.
*/
if (toplevel && gset->kind == GROUPING_SET_EMPTY)
@ -1806,9 +1809,9 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
}
/*
* At top level, keep the grouping set node; but if we're in a nested
* grouping set, then we need to concat the flattened result into the
* outer list if it's simply nested.
* At top level, keep the grouping set node; but if we're in a
* nested grouping set, then we need to concat the flattened
* result into the outer list if it's simply nested.
*/
if (toplevel || (gset->kind != GROUPING_SET_SETS))
@ -1826,6 +1829,7 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
foreach(l, (List *) expr)
{
Node *n = flatten_grouping_sets(lfirst(l), toplevel, hasGroupingSets);
if (n != (Node *) NIL)
{
if (IsA(n, List))
@ -1888,15 +1892,15 @@ transformGroupClauseExpr(List **flatresult, Bitmapset *seen_local,
* (Duplicates in grouping sets can affect the number of returned
* rows, so can't be dropped indiscriminately.)
*
* Since we don't care about anything except the sortgroupref,
* we can use a bitmapset rather than scanning lists.
* Since we don't care about anything except the sortgroupref, we can
* use a bitmapset rather than scanning lists.
*/
if (bms_is_member(tle->ressortgroupref, seen_local))
return 0;
/*
* If we're already in the flat clause list, we don't need
* to consider adding ourselves again.
* If we're already in the flat clause list, we don't need to consider
* adding ourselves again.
*/
found = targetIsInSortList(tle, InvalidOid, *flatresult);
if (found)
@ -1928,6 +1932,7 @@ transformGroupClauseExpr(List **flatresult, Bitmapset *seen_local,
if (sc->tleSortGroupRef == tle->ressortgroupref)
{
SortGroupClause *grpc = copyObject(sc);
if (!toplevel)
grpc->nulls_first = false;
*flatresult = lappend(*flatresult, grpc);
@ -1994,6 +1999,7 @@ transformGroupClauseList(List **flatresult,
exprKind,
useSQL99,
toplevel);
if (ref > 0)
{
seen_local = bms_add_member(seen_local, ref);
@ -2140,18 +2146,18 @@ transformGroupClause(ParseState *pstate, List *grouplist, List **groupingSets,
Bitmapset *seen_local = NULL;
/*
* Recursively flatten implicit RowExprs. (Technically this is only
* needed for GROUP BY, per the syntax rules for grouping sets, but
* we do it anyway.)
* Recursively flatten implicit RowExprs. (Technically this is only needed
* for GROUP BY, per the syntax rules for grouping sets, but we do it
* anyway.)
*/
flat_grouplist = (List *) flatten_grouping_sets((Node *) grouplist,
true,
&hasGroupingSets);
/*
* If the list is now empty, but hasGroupingSets is true, it's because
* we elided redundant empty grouping sets. Restore a single empty
* grouping set to leave a canonical form: GROUP BY ()
* If the list is now empty, but hasGroupingSets is true, it's because we
* elided redundant empty grouping sets. Restore a single empty grouping
* set to leave a canonical form: GROUP BY ()
*/
if (flat_grouplist == NIL && hasGroupingSets)
@ -2795,9 +2801,8 @@ transformOnConflictArbiter(ParseState *pstate,
List *save_namespace;
/*
* While we process the arbiter expressions, accept only
* non-qualified references to the target table. Hide any other
* relations.
* While we process the arbiter expressions, accept only non-qualified
* references to the target table. Hide any other relations.
*/
save_namespace = pstate->p_namespace;
pstate->p_namespace = NIL;

View File

@ -784,7 +784,8 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
TableSampleClause *tablesample;
List *fargs;
ListCell *larg;
int nargs, initnargs;
int nargs,
initnargs;
Oid init_arg_types[FUNC_MAX_ARGS];
/* Load the tablesample method */
@ -826,9 +827,9 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
Assert(initnargs >= 3);
/*
* First parameter is used to pass the SampleScanState, second is
* seed (REPEATABLE), skip the processing for them here, just assert
* that the types are correct.
* First parameter is used to pass the SampleScanState, second is seed
* (REPEATABLE), skip the processing for them here, just assert that the
* types are correct.
*/
Assert(procform->proargtypes.values[0] == INTERNALOID);
Assert(procform->proargtypes.values[1] == INT4OID);

View File

@ -560,8 +560,8 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
return;
/*
* From this point on, we can ignore the distinction between the
* RTE-name distance and the column-name distance.
* From this point on, we can ignore the distinction between the RTE-name
* distance and the column-name distance.
*/
columndistance += fuzzy_rte_penalty;
@ -581,11 +581,11 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
else if (columndistance == fuzzystate->distance)
{
/*
* This match distance may equal a prior match within this same
* range table. When that happens, the prior match may also be
* given, but only if there is no more than two equally distant
* matches from the RTE (in turn, our caller will only accept
* two equally distant matches overall).
* This match distance may equal a prior match within this same range
* table. When that happens, the prior match may also be given, but
* only if there is no more than two equally distant matches from the
* RTE (in turn, our caller will only accept two equally distant
* matches overall).
*/
if (AttributeNumberIsValid(fuzzystate->second))
{
@ -606,9 +606,9 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
else if (fuzzystate->distance <= MAX_FUZZY_DISTANCE)
{
/*
* Record as provisional first match (this can occasionally
* occur because previous lowest distance was "too low a
* bar", rather than being associated with a real match)
* Record as provisional first match (this can occasionally occur
* because previous lowest distance was "too low a bar", rather
* than being associated with a real match)
*/
fuzzystate->rfirst = rte;
fuzzystate->first = attnum;
@ -3056,10 +3056,10 @@ errorMissingColumn(ParseState *pstate,
* Extract closest col string for best match, if any.
*
* Infer an exact match referenced despite not being visible from the fact
* that an attribute number was not present in state passed back -- this is
* what is reported when !closestfirst. There might also be an exact match
* that was qualified with an incorrect alias, in which case closestfirst
* will be set (so hint is the same as generic fuzzy case).
* that an attribute number was not present in state passed back -- this
* is what is reported when !closestfirst. There might also be an exact
* match that was qualified with an incorrect alias, in which case
* closestfirst will be set (so hint is the same as generic fuzzy case).
*/
if (state->rfirst && AttributeNumberIsValid(state->first))
closestfirst = strVal(list_nth(state->rfirst->eref->colnames,

View File

@ -50,6 +50,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
"size mismatch of atomic_flag vs slock_t");
#ifndef HAVE_SPINLOCKS
/*
* NB: If we're using semaphore based TAS emulation, be careful to use a
* separate set of semaphores. Otherwise we'd get in trouble if an atomic
@ -99,6 +100,7 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
bool ret;
/*
* Do atomic op under a spinlock. It might look like we could just skip
* the cmpxchg if the lock isn't available, but that'd just emulate a
@ -125,6 +127,7 @@ uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
uint32 oldval;
SpinLockAcquire((slock_t *) &ptr->sema);
oldval = ptr->value;
ptr->value += add_;

View File

@ -153,6 +153,7 @@ PGSemaphoreLock(PGSemaphore sema)
done = true;
break;
case WAIT_IO_COMPLETION:
/*
* The system interrupted the wait to execute an I/O
* completion routine or asynchronous procedure call in this

View File

@ -1915,8 +1915,8 @@ do_autovacuum(void)
/*
* Compute the multixact age for which freezing is urgent. This is
* normally autovacuum_multixact_freeze_max_age, but may be less if we
* are short of multixact member space.
* normally autovacuum_multixact_freeze_max_age, but may be less if we are
* short of multixact member space.
*/
effective_multixact_freeze_max_age = MultiXactMemberFreezeThreshold();

View File

@ -254,11 +254,11 @@ BackgroundWorkerStateChange(void)
}
/*
* If the worker is marked for termination, we don't need to add it
* to the registered workers list; we can just free the slot.
* However, if bgw_notify_pid is set, the process that registered the
* worker may need to know that we've processed the terminate request,
* so be sure to signal it.
* If the worker is marked for termination, we don't need to add it to
* the registered workers list; we can just free the slot. However, if
* bgw_notify_pid is set, the process that registered the worker may
* need to know that we've processed the terminate request, so be sure
* to signal it.
*/
if (slot->terminate)
{
@ -435,8 +435,8 @@ ResetBackgroundWorkerCrashTimes(void)
rw = slist_container(RegisteredBgWorker, rw_lnode, iter.cur);
/*
* For workers that should not be restarted, we don't want to lose
* the information that they have crashed; otherwise, they would be
* For workers that should not be restarted, we don't want to lose the
* information that they have crashed; otherwise, they would be
* restarted, which is wrong.
*/
if (rw->rw_worker.bgw_restart_time != BGW_NEVER_RESTART)
@ -679,7 +679,8 @@ StartBackgroundWorker(void)
/*
* Early initialization. Some of this could be useful even for
* background workers that aren't using shared memory, but they can
* call the individual startup routines for those subsystems if needed.
* call the individual startup routines for those subsystems if
* needed.
*/
BaseInit();

View File

@ -2930,9 +2930,9 @@ CleanupBackgroundWorker(int pid,
}
/*
* We must release the postmaster child slot whether this worker
* is connected to shared memory or not, but we only treat it as
* a crash if it is in fact connected.
* We must release the postmaster child slot whether this worker is
* connected to shared memory or not, but we only treat it as a crash
* if it is in fact connected.
*/
if (!ReleasePostmasterChildSlot(rw->rw_child_slot) &&
(rw->rw_worker.bgw_flags & BGWORKER_SHMEM_ACCESS) != 0)

View File

@ -130,11 +130,12 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
&labelfile, tblspcdir, &tablespaces,
&tblspc_map_file,
opt->progress, opt->sendtblspcmapfile);
/*
* Once do_pg_start_backup has been called, ensure that any failure causes
* us to abort the backup so we don't "leak" a backup counter. For this reason,
* *all* functionality between do_pg_start_backup() and do_pg_stop_backup()
* should be inside the error cleanup block!
* us to abort the backup so we don't "leak" a backup counter. For this
* reason, *all* functionality between do_pg_start_backup() and
* do_pg_stop_backup() should be inside the error cleanup block!
*/
PG_ENSURE_ERROR_CLEANUP(base_backup_cleanup, (Datum) 0);
@ -145,8 +146,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
SendXlogRecPtrResult(startptr, starttli);
/*
* Calculate the relative path of temporary statistics directory in order
* to skip the files which are located in that directory later.
* Calculate the relative path of temporary statistics directory in
* order to skip the files which are located in that directory later.
*/
if (is_absolute_path(pgstat_stat_directory) &&
strncmp(pgstat_stat_directory, DataDir, datadirpathlen) == 0)
@ -900,8 +901,8 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces,
/*
* If there's a backup_label or tablespace_map file, it belongs to a
* backup started by the user with pg_start_backup(). It is *not*
* correct for this backup, our backup_label/tablespace_map is injected
* into the tar separately.
* correct for this backup, our backup_label/tablespace_map is
* injected into the tar separately.
*/
if (strcmp(de->d_name, BACKUP_LABEL_FILE) == 0)
continue;

View File

@ -93,11 +93,11 @@ libpqrcv_connect(char *conninfo)
const char *vals[5];
/*
* We use the expand_dbname parameter to process the connection string
* (or URI), and pass some extra options. The deliberately undocumented
* parameter "replication=true" makes it a replication connection.
* The database name is ignored by the server in replication mode, but
* specify "replication" for .pgpass lookup.
* We use the expand_dbname parameter to process the connection string (or
* URI), and pass some extra options. The deliberately undocumented
* parameter "replication=true" makes it a replication connection. The
* database name is ignored by the server in replication mode, but specify
* "replication" for .pgpass lookup.
*/
keys[0] = "dbname";
vals[0] = conninfo;

View File

@ -271,6 +271,7 @@ replorigin_create(char *roname)
bool nulls[Natts_pg_replication_origin];
Datum values[Natts_pg_replication_origin];
bool collides;
CHECK_FOR_INTERRUPTS();
ScanKeyInit(&key,
@ -646,6 +647,7 @@ StartupReplicationOrigin(void)
/* don't want to overwrite already existing state */
#ifdef USE_ASSERT_CHECKING
static bool already_started = false;
Assert(!already_started);
already_started = true;
#endif
@ -660,8 +662,8 @@ StartupReplicationOrigin(void)
fd = OpenTransientFile((char *) path, O_RDONLY | PG_BINARY, 0);
/*
* might have had max_replication_slots == 0 last run, or we just brought up a
* standby.
* might have had max_replication_slots == 0 last run, or we just brought
* up a standby.
*/
if (fd < 0 && errno == ENOENT)
return;
@ -899,6 +901,7 @@ replorigin_advance(RepOriginId node,
if (wal_log)
{
xl_replorigin_set xlrec;
xlrec.remote_lsn = remote_commit;
xlrec.node_id = node;
xlrec.force = go_backward;
@ -911,8 +914,8 @@ replorigin_advance(RepOriginId node,
/*
* Due to - harmless - race conditions during a checkpoint we could see
* values here that are older than the ones we already have in
* memory. Don't overwrite those.
* values here that are older than the ones we already have in memory.
* Don't overwrite those.
*/
if (go_backward || replication_state->remote_lsn < remote_commit)
replication_state->remote_lsn = remote_commit;
@ -973,7 +976,6 @@ replorigin_get_progress(RepOriginId node, bool flush)
static void
ReplicationOriginExitCleanup(int code, Datum arg)
{
LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
if (session_replication_state != NULL &&

View File

@ -1337,6 +1337,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
switch (change->action)
{
case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
/*
* Confirmation for speculative insertion arrived. Simply
* use as a normal record. It'll be cleaned up at the end
@ -1380,10 +1381,10 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
goto change_done;
/*
* For now ignore sequence changes entirely. Most of
* the time they don't log changes using records we
* understand, so it doesn't make sense to handle the
* few cases we do.
* For now ignore sequence changes entirely. Most of the
* time they don't log changes using records we
* understand, so it doesn't make sense to handle the few
* cases we do.
*/
if (relation->rd_rel->relkind == RELKIND_SEQUENCE)
goto change_done;
@ -1395,9 +1396,9 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
rb->apply_change(rb, txn, relation, change);
/*
* Only clear reassembled toast chunks if we're
* sure they're not required anymore. The creator
* of the tuple tells us.
* Only clear reassembled toast chunks if we're sure
* they're not required anymore. The creator of the
* tuple tells us.
*/
if (change->data.tp.clear_toast_afterwards)
ReorderBufferToastReset(rb, txn);
@ -1419,6 +1420,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
}
change_done:
/*
* Either speculative insertion was confirmed, or it was
* unsuccessful and the record isn't needed anymore.
@ -1437,6 +1439,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
break;
case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
/*
* Speculative insertions are dealt with by delaying the
* processing of the insert until the confirmation record
@ -1704,9 +1707,9 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
txn->final_lsn = lsn;
/*
* Process cache invalidation messages if there are any. Even if we're
* not interested in the transaction's contents, it could have manipulated
* the catalog and we need to update the caches according to that.
* Process cache invalidation messages if there are any. Even if we're not
* interested in the transaction's contents, it could have manipulated the
* catalog and we need to update the caches according to that.
*/
if (txn->base_snapshot != NULL && txn->ninvalidations > 0)
{

View File

@ -153,9 +153,8 @@ struct SnapBuild
TransactionId xmax;
/*
* Don't replay commits from an LSN < this LSN. This can be set
* externally but it will also be advanced (never retreat) from within
* snapbuild.c.
* Don't replay commits from an LSN < this LSN. This can be set externally
* but it will also be advanced (never retreat) from within snapbuild.c.
*/
XLogRecPtr start_decoding_at;

View File

@ -99,9 +99,9 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
CheckLogicalDecodingRequirements();
/*
* Acquire a logical decoding slot, this will check for conflicting
* names. Initially create it as ephemeral - that allows us to nicely
* handle errors during initialization because it'll get dropped if this
* Acquire a logical decoding slot, this will check for conflicting names.
* Initially create it as ephemeral - that allows us to nicely handle
* errors during initialization because it'll get dropped if this
* transaction fails. We'll make it persistent at the end.
*/
ReplicationSlotCreate(NameStr(*name), true, RS_EPHEMERAL);

View File

@ -781,6 +781,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
else
{
CheckLogicalDecodingRequirements();
/*
* Initially create the slot as ephemeral - that allows us to nicely
* handle errors during initialization because it'll get dropped if
@ -1266,9 +1267,9 @@ exec_replication_command(const char *cmd_string)
MemoryContext old_context;
/*
* Log replication command if log_replication_commands is enabled.
* Even when it's disabled, log the command with DEBUG1 level for
* backward compatibility.
* Log replication command if log_replication_commands is enabled. Even
* when it's disabled, log the command with DEBUG1 level for backward
* compatibility.
*/
ereport(log_replication_commands ? LOG : DEBUG1,
(errmsg("received replication command: %s", cmd_string)));

View File

@ -1750,8 +1750,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
/*
* Apply any row level security policies. We do this last because it
* requires special recursion detection if the new quals have sublink
* subqueries, and if we did it in the loop above query_tree_walker
* would then recurse into those quals a second time.
* subqueries, and if we did it in the loop above query_tree_walker would
* then recurse into those quals a second time.
*/
rt_index = 0;
foreach(lc, parsetree->rtable)
@ -2797,8 +2797,8 @@ rewriteTargetView(Query *parsetree, Relation view)
* happens in ordinary SELECT usage of a view: all referenced columns must
* have read permission, even if optimization finds that some of them can
* be discarded during query transformation. The flattening we're doing
* here is an optional optimization, too. (If you are unpersuaded and want
* to change this, note that applying adjust_view_column_set to
* here is an optional optimization, too. (If you are unpersuaded and
* want to change this, note that applying adjust_view_column_set to
* view_rte->selectedCols is clearly *not* the right answer, since that
* neglects base-rel columns used in the view's WHERE quals.)
*

View File

@ -124,9 +124,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
user_id = rte->checkAsUser ? rte->checkAsUser : GetUserId();
/*
* If this is not a normal relation, or we have been told
* to explicitly skip RLS (perhaps because this is an FK check)
* then just return immediately.
* If this is not a normal relation, or we have been told to explicitly
* skip RLS (perhaps because this is an FK check) then just return
* immediately.
*/
if (rte->relid < FirstNormalObjectId
|| rte->relkind != RELKIND_RELATION
@ -148,9 +148,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
if (rls_status == RLS_NONE_ENV)
{
/*
* Indicate that this query may involve RLS and must therefore
* be replanned if the environment changes (GUCs, role), but we
* are not adding anything here.
* Indicate that this query may involve RLS and must therefore be
* replanned if the environment changes (GUCs, role), but we are not
* adding anything here.
*/
*hasRowSecurity = true;
@ -166,12 +166,11 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
/*
* Check if this is only the default-deny policy.
*
* Normally, if the table has row security enabled but there are
* no policies, we use a default-deny policy and not allow anything.
* However, when an extension uses the hook to add their own
* policies, we don't want to include the default deny policy or
* there won't be any way for a user to use an extension exclusively
* for the policies to be used.
* Normally, if the table has row security enabled but there are no
* policies, we use a default-deny policy and not allow anything. However,
* when an extension uses the hook to add their own policies, we don't
* want to include the default deny policy or there won't be any way for a
* user to use an extension exclusively for the policies to be used.
*/
if (((RowSecurityPolicy *) linitial(rowsec_policies))->policy_id
== InvalidOid)
@ -187,8 +186,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
* extensions can add either permissive or restrictive policies.
*
* Note that, as with the internal policies, if multiple policies are
* returned then they will be combined into a single expression with
* all of them OR'd (for permissive) or AND'd (for restrictive) together.
* returned then they will be combined into a single expression with all
* of them OR'd (for permissive) or AND'd (for restrictive) together.
*
* If only a USING policy is returned by the extension then it will be
* used for WITH CHECK as well, similar to how internal policies are
@ -226,9 +225,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
}
/*
* If the only built-in policy is the default-deny one, and hook
* policies exist, then use the hook policies only and do not apply
* the default-deny policy. Otherwise, we will apply both sets below.
* If the only built-in policy is the default-deny one, and hook policies
* exist, then use the hook policies only and do not apply the
* default-deny policy. Otherwise, we will apply both sets below.
*/
if (defaultDeny &&
(hook_policies_restrictive != NIL || hook_policies_permissive != NIL))
@ -238,10 +237,10 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
}
/*
* For INSERT or UPDATE, we need to add the WITH CHECK quals to
* Query's withCheckOptions to verify that any new records pass the
* WITH CHECK policy (this will be a copy of the USING policy, if no
* explicit WITH CHECK policy exists).
* For INSERT or UPDATE, we need to add the WITH CHECK quals to Query's
* withCheckOptions to verify that any new records pass the WITH CHECK
* policy (this will be a copy of the USING policy, if no explicit WITH
* CHECK policy exists).
*/
if (commandType == CMD_INSERT || commandType == CMD_UPDATE)
{
@ -269,8 +268,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
}
/*
* Handle built-in policies, if there are no permissive
* policies from the hook.
* Handle built-in policies, if there are no permissive policies from
* the hook.
*/
if (rowsec_with_check_expr && !hook_with_check_expr_permissive)
{
@ -409,8 +408,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
heap_close(rel, NoLock);
/*
* Mark this query as having row security, so plancache can invalidate
* it when necessary (eg: role changes)
* Mark this query as having row security, so plancache can invalidate it
* when necessary (eg: role changes)
*/
*hasRowSecurity = true;
@ -432,9 +431,10 @@ pull_row_security_policies(CmdType cmd, Relation relation, Oid user_id)
/*
* Row security is enabled for the relation and the row security GUC is
* either 'on' or 'force' here, so find the policies to apply to the table.
* There must always be at least one policy defined (may be the simple
* 'default-deny' policy, if none are explicitly defined on the table).
* either 'on' or 'force' here, so find the policies to apply to the
* table. There must always be at least one policy defined (may be the
* simple 'default-deny' policy, if none are explicitly defined on the
* table).
*/
foreach(item, relation->rd_rsdesc->policies)
{
@ -528,9 +528,9 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
List *with_check_quals = NIL;
/*
* Extract the USING and WITH CHECK quals from each of the policies
* and add them to our lists. We only want WITH CHECK quals if this
* RTE is the query's result relation.
* Extract the USING and WITH CHECK quals from each of the policies and
* add them to our lists. We only want WITH CHECK quals if this RTE is
* the query's result relation.
*/
foreach(item, policies)
{
@ -545,8 +545,8 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
with_check_quals);
/*
* For each policy, if there is only a USING clause then copy/use it for
* the WITH CHECK policy also, if this RTE is the query's result
* For each policy, if there is only a USING clause then copy/use it
* for the WITH CHECK policy also, if this RTE is the query's result
* relation.
*/
if (policy->qual != NULL && policy->with_check_qual == NULL &&
@ -568,16 +568,16 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
BoolGetDatum(false), false, true), quals);
/*
* Row security quals always have the target table as varno 1, as no
* joins are permitted in row security expressions. We must walk the
* expression, updating any references to varno 1 to the varno
* the table has in the outer query.
* Row security quals always have the target table as varno 1, as no joins
* are permitted in row security expressions. We must walk the expression,
* updating any references to varno 1 to the varno the table has in the
* outer query.
*
* We rewrite the expression in-place.
*
* We must have some quals at this point; the default-deny policy, if
* nothing else. Note that we might not have any WITH CHECK quals-
* that's fine, as this might not be the resultRelation.
* nothing else. Note that we might not have any WITH CHECK quals- that's
* fine, as this might not be the resultRelation.
*/
Assert(quals != NIL);
@ -596,8 +596,8 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
*qual_eval = (Expr *) linitial(quals);
/*
* Similairly, if more than one WITH CHECK qual is returned, then
* they need to be combined together.
* Similairly, if more than one WITH CHECK qual is returned, then they
* need to be combined together.
*
* with_check_quals is allowed to be NIL here since this might not be the
* resultRelation (see above).

View File

@ -346,6 +346,7 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
{
ref->buffer = InvalidBuffer;
/*
* Mark the just used entry as reserved - in many scenarios that
* allows us to avoid ever having to search the array/hash for free
@ -357,6 +358,7 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
{
bool found;
Buffer buffer = ref->buffer;
hash_search(PrivateRefCountHash,
(void *) &buffer,
HASH_REMOVE,
@ -669,8 +671,8 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
found);
/*
* In RBM_ZERO_AND_LOCK mode the caller expects the page to
* be locked on return.
* In RBM_ZERO_AND_LOCK mode the caller expects the page to be
* locked on return.
*/
if (!isLocalBuf)
{
@ -809,9 +811,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* page before the caller has had a chance to initialize it.
*
* Since no-one else can be looking at the page contents yet, there is no
* difference between an exclusive lock and a cleanup-strength lock.
* (Note that we cannot use LockBuffer() of LockBufferForCleanup() here,
* because they assert that the buffer is already valid.)
* difference between an exclusive lock and a cleanup-strength lock. (Note
* that we cannot use LockBuffer() of LockBufferForCleanup() here, because
* they assert that the buffer is already valid.)
*/
if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
!isLocalBuf)
@ -939,8 +941,8 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
for (;;)
{
/*
* Ensure, while the spinlock's not yet held, that there's a free refcount
* entry.
* Ensure, while the spinlock's not yet held, that there's a free
* refcount entry.
*/
ReservePrivateRefCountEntry();
@ -2169,6 +2171,7 @@ CheckForBufferLeaks(void)
if (PrivateRefCountOverflowed)
{
HASH_SEQ_STATUS hstat;
hash_seq_init(&hstat, PrivateRefCountHash);
while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
{
@ -2974,6 +2977,7 @@ IncrBufferRefCount(Buffer buffer)
else
{
PrivateRefCountEntry *ref;
ref = GetPrivateRefCountEntry(buffer, true);
Assert(ref != NULL);
ref->refcount++;

View File

@ -381,6 +381,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
if (complete_passes)
{
*complete_passes = StrategyControl->completePasses;
/*
* Additionally add the number of wraparounds that happened before
* completePasses could be incremented. C.f. ClockSweepTick().

View File

@ -341,11 +341,11 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
FreeDir(dbspace_dir);
/*
* copy_file() above has already called pg_flush_data() on the
* files it created. Now we need to fsync those files, because
* a checkpoint won't do it for us while we're in recovery. We
* do this in a separate pass to allow the kernel to perform
* all the flushes (especially the metadata ones) at once.
* copy_file() above has already called pg_flush_data() on the files
* it created. Now we need to fsync those files, because a checkpoint
* won't do it for us while we're in recovery. We do this in a
* separate pass to allow the kernel to perform all the flushes
* (especially the metadata ones) at once.
*/
dbspace_dir = AllocateDir(dbspacedirname);
if (dbspace_dir == NULL)

View File

@ -1707,10 +1707,10 @@ ProcArrayInstallRestoredXmin(TransactionId xmin, PGPROC *proc)
pgxact = &allPgXact[proc->pgprocno];
/*
* Be certain that the referenced PGPROC has an advertised xmin which
* is no later than the one we're installing, so that the system-wide
* xmin can't go backwards. Also, make sure it's running in the same
* database, so that the per-database xmin cannot go backwards.
* Be certain that the referenced PGPROC has an advertised xmin which is
* no later than the one we're installing, so that the system-wide xmin
* can't go backwards. Also, make sure it's running in the same database,
* so that the per-database xmin cannot go backwards.
*/
xid = pgxact->xmin; /* fetch just once */
if (proc->databaseId == MyDatabaseId &&

View File

@ -399,12 +399,12 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
/*
* We want to avoid copying the data if at all possible, but every
* chunk of bytes we write into the queue has to be MAXALIGN'd,
* except the last. Thus, if a chunk other than the last one ends
* on a non-MAXALIGN'd boundary, we have to combine the tail end of
* its data with data from one or more following chunks until we
* either reach the last chunk or accumulate a number of bytes which
* is MAXALIGN'd.
* chunk of bytes we write into the queue has to be MAXALIGN'd, except
* the last. Thus, if a chunk other than the last one ends on a
* non-MAXALIGN'd boundary, we have to combine the tail end of its
* data with data from one or more following chunks until we either
* reach the last chunk or accumulate a number of bytes which is
* MAXALIGN'd.
*/
if (which_iov + 1 < iovcnt &&
offset + MAXIMUM_ALIGNOF > iov[which_iov].len)

Some files were not shown because too many files have changed in this diff Show More