Spelling fixes in code comments

From: Josh Soref <jsoref@gmail.com>
This commit is contained in:
Peter Eisentraut 2017-03-14 11:38:30 -04:00
parent 5ed6fff6b7
commit f97a028d8e
33 changed files with 42 additions and 42 deletions

View File

@ -416,7 +416,7 @@ des_setkey(const char *key)
&& rawkey1 == old_rawkey1)
{
/*
* Already setup for this key. This optimisation fails on a zero key
* Already setup for this key. This optimization fails on a zero key
* (which is weak and has bad parity anyway) in order to simplify the
* starting conditions.
*/

View File

@ -51,7 +51,7 @@ static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans);
* and stop_date eq INFINITY [ and update_user eq current user ]
* and all other column values as in new tuple, and insert tuple
* with old data and stop_date eq current date
* ELSE - skip updation of tuple.
* ELSE - skip updating of tuple.
* 2. IF a delete affects tuple with stop_date eq INFINITY
* then insert the same tuple with stop_date eq current date
* [ and delete_user eq current user ]

View File

@ -1612,7 +1612,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* It's also possible to move I/O out of the lock, but on every error we
* should check whether somebody committed our transaction in different
* backend. Let's leave this optimisation for future, if somebody will
* backend. Let's leave this optimization for future, if somebody will
* spot that this place cause bottleneck.
*
* Note that it isn't possible for there to be a GXACT with a

View File

@ -2406,7 +2406,7 @@ CopyFrom(CopyState cstate)
* earlier scan or command. This ensures that if this subtransaction
* aborts then the frozen rows won't be visible after xact cleanup. Note
* that the stronger test of exactly which subtransaction created it is
* crucial for correctness of this optimisation.
* crucial for correctness of this optimization.
*/
if (cstate->freeze)
{
@ -2973,7 +2973,7 @@ BeginCopyFrom(ParseState *pstate,
* the special case of when the default expression is the
* nextval() of a sequence which in this specific case is
* known to be safe for use with the multi-insert
* optimisation. Hence we use this special case function
* optimization. Hence we use this special case function
* checker rather than the standard check for
* contain_volatile_functions().
*/

View File

@ -3182,7 +3182,7 @@ AlterTableGetLockLevel(List *cmds)
break;
/*
* Changing foreign table options may affect optimisation.
* Changing foreign table options may affect optimization.
*/
case AT_GenericOptions:
case AT_AlterColumnGenericOptions:

View File

@ -724,7 +724,7 @@ GetExistingLocalJoinPath(RelOptInfo *joinrel)
Path *path = (Path *) lfirst(lc);
JoinPath *joinpath = NULL;
/* Skip parameterised paths. */
/* Skip parameterized paths. */
if (path->param_info != NULL)
continue;

View File

@ -4582,7 +4582,7 @@ fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol)
}
}
/* Ooops... */
/* Oops... */
elog(ERROR, "index key does not match expected index column");
return NULL; /* keep compiler quiet */
}

View File

@ -802,7 +802,7 @@ merge_collation_state(Oid collation,
else if (collation != DEFAULT_COLLATION_OID)
{
/*
* Ooops, we have a conflict. We cannot throw error
* Oops, we have a conflict. We cannot throw error
* here, since the conflict could be resolved by a
* later sibling CollateExpr, or the parent might not
* care about collation anyway. Return enough info to
@ -821,7 +821,7 @@ merge_collation_state(Oid collation,
if (collation != context->collation)
{
/*
* Ooops, we have a conflict of explicit COLLATE clauses.
* Oops, we have a conflict of explicit COLLATE clauses.
* Here we choose to throw error immediately; that is what
* the SQL standard says to do, and there's no good reason
* to be less strict.

View File

@ -210,7 +210,7 @@ variable_coerce_param_hook(ParseState *pstate, Param *param,
}
else
{
/* Ooops */
/* Oops */
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
errmsg("inconsistent types deduced for parameter $%d",

View File

@ -224,7 +224,7 @@ static const unsigned char pg_char_properties[128] = {
* pg_set_regex_collation: set collation for these functions to obey
*
* This is called when beginning compilation or execution of a regexp.
* Since there's no need for re-entrancy of regexp operations, it's okay
* Since there's no need for reentrancy of regexp operations, it's okay
* to store the results in static variables.
*/
void

View File

@ -280,7 +280,7 @@ ProcArrayAdd(PGPROC *proc)
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
* Ooops, no room. (This really shouldn't happen, since there is a
* Oops, no room. (This really shouldn't happen, since there is a
* fixed supply of PGPROC structs too, and so we should have failed
* earlier.)
*/
@ -370,7 +370,7 @@ ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
}
}
/* Ooops */
/* Oops */
LWLockRelease(ProcArrayLock);
elog(LOG, "failed to find proc %p in ProcArray", proc);

View File

@ -1125,7 +1125,7 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
&found);
if (!proclock)
{
/* Ooops, not enough shmem for the proclock */
/* Oops, not enough shmem for the proclock */
if (lock->nRequested == 0)
{
/*
@ -4046,7 +4046,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
&found);
if (!proclock)
{
/* Ooops, not enough shmem for the proclock */
/* Oops, not enough shmem for the proclock */
if (lock->nRequested == 0)
{
/*

View File

@ -23,7 +23,7 @@
/*
* Temporay we use TSLexeme.flags for inner use...
* Temporary we use TSLexeme.flags for inner use...
*/
#define DT_USEASIS 0x1000

View File

@ -4312,7 +4312,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
return true;
}
/* Ooops, clause has wrong structure (probably var op var) */
/* Oops, clause has wrong structure (probably var op var) */
ReleaseVariableStats(*vardata);
ReleaseVariableStats(rdata);

View File

@ -2332,7 +2332,7 @@ get_typavgwidth(Oid typid, int32 typmod)
}
/*
* Ooops, we have no idea ... wild guess time.
* Oops, we have no idea ... wild guess time.
*/
return 32;
}

View File

@ -621,7 +621,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
return NIL;
}
/* Ooops, the race case happened. Release useless locks. */
/* Oops, the race case happened. Release useless locks. */
AcquirePlannerLocks(plansource->query_list, false);
}
@ -845,7 +845,7 @@ CheckCachedPlan(CachedPlanSource *plansource)
return true;
}
/* Ooops, the race case happened. Release useless locks. */
/* Oops, the race case happened. Release useless locks. */
AcquireExecutorLocks(plan->stmt_list, false);
}

View File

@ -318,7 +318,7 @@ errstart(int elevel, const char *filename, int lineno,
*/
if (ErrorContext == NULL)
{
/* Ooops, hard crash time; very little we can do safely here */
/* Oops, hard crash time; very little we can do safely here */
write_stderr("error occurred at %s:%d before error message processing is available\n",
filename ? filename : "(unknown file)", lineno);
exit(2);
@ -331,7 +331,7 @@ errstart(int elevel, const char *filename, int lineno,
if (recursion_depth++ > 0 && elevel >= ERROR)
{
/*
* Ooops, error during error processing. Clear ErrorContext as
* Oops, error during error processing. Clear ErrorContext as
* discussed at top of file. We will not return to the original
* error's reporter or handler, so we don't need it.
*/
@ -1302,7 +1302,7 @@ elog_start(const char *filename, int lineno, const char *funcname)
/* Make sure that memory context initialization has finished */
if (ErrorContext == NULL)
{
/* Ooops, hard crash time; very little we can do safely here */
/* Oops, hard crash time; very little we can do safely here */
write_stderr("error occurred at %s:%d before error message processing is available\n",
filename ? filename : "(unknown file)", lineno);
exit(2);

View File

@ -877,7 +877,7 @@ struct fmgr_security_definer_cache
* To execute a call, we temporarily replace the flinfo with the cached
* and looked-up one, while keeping the outer fcinfo (which contains all
* the actual arguments, etc.) intact. This is not re-entrant, but then
* the fcinfo itself can't be used re-entrantly anyway.
* the fcinfo itself can't be used reentrantly anyway.
*/
static Datum
fmgr_security_definer(PG_FUNCTION_ARGS)

View File

@ -72,7 +72,7 @@
* when combined with HASH_DEBUG, these are displayed by hdestroy().
*
* Problems & fixes to ejp@ausmelb.oz. WARNING: relies on pre-processor
* concatenation property, in probably unnecessary code 'optimisation'.
* concatenation property, in probably unnecessary code 'optimization'.
*
* Modified margo@postgres.berkeley.edu February 1990
* added multiple table interface

View File

@ -114,7 +114,7 @@ If a show_hook is provided, it points to a function of the signature
This hook allows variable-specific computation of the value displayed
by SHOW (and other SQL features for showing GUC variable values).
The return value can point to a static buffer, since show functions are
not used re-entrantly.
not used reentrantly.
Saving/Restoring GUC Variable Values

View File

@ -92,7 +92,7 @@ struct ParallelSlot
/* These fields are valid if workerStatus == WRKR_WORKING: */
ParallelCompletionPtr callback; /* function to call on completion */
void *callback_data; /* passthru data for it */
void *callback_data; /* passthrough data for it */
ArchiveHandle *AH; /* Archive data worker is using */

View File

@ -173,7 +173,7 @@ fmtQualifiedId(int remoteVersion, const char *schema, const char *id)
* returned by PQserverVersion()) as a string. This exists mainly to
* encapsulate knowledge about two-part vs. three-part version numbers.
*
* For re-entrancy, caller must supply the buffer the string is put in.
* For reentrancy, caller must supply the buffer the string is put in.
* Recommended size of the buffer is 32 bytes.
*
* Returns address of 'buf', as a notational convenience.

View File

@ -127,7 +127,7 @@ extern JsonLexContext *makeJsonLexContextCstringLen(char *json,
/*
* Utility function to check if a string is a valid JSON number.
*
* str agrument does not need to be nul-terminated.
* str argument does not need to be nul-terminated.
*/
extern bool IsValidJsonNumber(const char *str, int len);

View File

@ -2136,7 +2136,7 @@ keep_going: /* We will come back to here until there is
} /* loop over addresses */
/*
* Ooops, no more addresses. An appropriate error message is
* Oops, no more addresses. An appropriate error message is
* already set up, so just set the right status.
*/
goto error_return;

View File

@ -2334,7 +2334,7 @@ PQputCopyEnd(PGconn *conn, const char *errormsg)
{
if (errormsg)
{
/* Ooops, no way to do this in 2.0 */
/* Oops, no way to do this in 2.0 */
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("function requires at least protocol version 3.0\n"));
return -1;

View File

@ -6205,10 +6205,10 @@ DPPP_(my_grok_number)(pTHX_ const char *pv, STRLEN len, UV *valuep)
/* UVs are at least 32 bits, so the first 9 decimal digits cannot
overflow. */
UV value = *s - '0';
/* This construction seems to be more optimiser friendly.
/* This construction seems to be more optimizer friendly.
(without it gcc does the isDIGIT test and the *s - '0' separately)
With it gcc on arm is managing 6 instructions (6 cycles) per digit.
In theory the optimiser could deduce how far to unroll the loop
In theory the optimizer could deduce how far to unroll the loop
before checking for overflow. */
if (++s < send) {
int digit = *s - '0';
@ -6606,7 +6606,7 @@ DPPP_(my_grok_oct)(pTHX_ const char *start, STRLEN *len_p, I32 *flags, NV *resul
bool overflowed = FALSE;
for (; len-- && *s; s++) {
/* gcc 2.95 optimiser not smart enough to figure that this subtraction
/* gcc 2.95 optimizer not smart enough to figure that this subtraction
out front allows slicker code. */
int digit = *s - '0';
if (digit >= 0 && digit <= 7) {

View File

@ -5543,7 +5543,7 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate,
exec_check_rw_parameter(expr, expr->rwparam);
if (expr->expr_simple_expr == NULL)
{
/* Ooops, release refcount and fail */
/* Oops, release refcount and fail */
ReleaseCachedPlan(cplan, true);
return false;
}

View File

@ -122,7 +122,7 @@ PLy_procedure_get(Oid fn_oid, Oid fn_rel, bool is_trigger)
}
PG_CATCH();
{
/* Do not leave an uninitialised entry in the cache */
/* Do not leave an uninitialized entry in the cache */
if (use_cache)
hash_search(PLy_procedure_cache, &key, HASH_REMOVE, NULL);
PG_RE_THROW();

View File

@ -827,7 +827,7 @@ PLyObject_ToComposite(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inar
/*
* This will set up the dummy PLyTypeInfo's output conversion routines,
* since we left is_rowtype as 2. A future optimisation could be caching
* since we left is_rowtype as 2. A future optimization could be caching
* that info instead of looking it up every time a tuple is returned from
* the function.
*/

View File

@ -1,7 +1,7 @@
--
-- ERRORS
--
-- bad in postquel, but ok in postsql
-- bad in postquel, but ok in PostgreSQL
select 1;
?column?
----------

View File

@ -375,7 +375,7 @@ select *
ERROR: aggregate functions are not allowed in FROM clause of their own query level
LINE 3: lateral (select a, b, sum(v.x) from gstest_data(v.x) ...
^
-- min max optimisation should still work with GROUP BY ()
-- min max optimization should still work with GROUP BY ()
explain (costs off)
select min(unique1) from tenk1 GROUP BY ();
QUERY PLAN

View File

@ -2,7 +2,7 @@
-- ERRORS
--
-- bad in postquel, but ok in postsql
-- bad in postquel, but ok in PostgreSQL
select 1;

View File

@ -140,7 +140,7 @@ select *
from (values (1),(2)) v(x),
lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup (a,b)) s;
-- min max optimisation should still work with GROUP BY ()
-- min max optimization should still work with GROUP BY ()
explain (costs off)
select min(unique1) from tenk1 GROUP BY ();