Fix misc typos.

Oskari Saarenmaa. Backpatch to stable branches where applicable.
This commit is contained in:
Heikki Linnakangas 2015-09-05 11:35:49 +03:00
parent c39f5674df
commit c80b5f66c6
17 changed files with 25 additions and 25 deletions

View File

@ -369,7 +369,7 @@ gbt_ts_penalty(PG_FUNCTION_ARGS)
newdbl[2]; newdbl[2];
/* /*
* We are allways using "double" timestamps here. Precision should be good * We are always using "double" timestamps here. Precision should be good
* enough. * enough.
*/ */
orgdbl[0] = ((double) origentry->lower); orgdbl[0] = ((double) origentry->lower);

View File

@ -52,7 +52,7 @@ gbt_var_decompress(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(entry); PG_RETURN_POINTER(entry);
} }
/* Returns a better readable representaion of variable key ( sets pointer ) */ /* Returns a better readable representation of variable key ( sets pointer ) */
GBT_VARKEY_R GBT_VARKEY_R
gbt_var_key_readable(const GBT_VARKEY *k) gbt_var_key_readable(const GBT_VARKEY *k)
{ {

View File

@ -814,7 +814,7 @@ cube_inter(PG_FUNCTION_ARGS)
Max(LL_COORD(b, i), UR_COORD(b, i)) Max(LL_COORD(b, i), UR_COORD(b, i))
); );
} }
/* continue on the higher dimemsions only present in 'a' */ /* continue on the higher dimensions only present in 'a' */
for (; i < DIM(a); i++) for (; i < DIM(a); i++)
{ {
result->x[i] = Max(0, result->x[i] = Max(0,

View File

@ -138,7 +138,7 @@ ALTER ROLE { <replaceable class="PARAMETER">role_specification</replaceable> | A
<term>CURRENT_USER</term> <term>CURRENT_USER</term>
<listitem> <listitem>
<para> <para>
Alter the current user instead of an explicitely identified role. Alter the current user instead of an explicitly identified role.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -147,7 +147,7 @@ ALTER ROLE { <replaceable class="PARAMETER">role_specification</replaceable> | A
<term>SESSION_USER</term> <term>SESSION_USER</term>
<listitem> <listitem>
<para> <para>
Alter the current session user instead of an explicitely identified Alter the current session user instead of an explicitly identified
role. role.
</para> </para>
</listitem> </listitem>

View File

@ -460,7 +460,7 @@ FIXME: Add Andres
<!-- <!--
2014-07-01 [9f03ca9] Robert..: Avoid copying index tuples when building an ind.. 2014-07-01 [9f03ca9] Robert..: Avoid copying index tuples when building an ind..
--> -->
Speed up CREATE INDEX by avoiding unneccessary memory copies (Robert Haas) Speed up CREATE INDEX by avoiding unnecessary memory copies (Robert Haas)
</para> </para>
</listitem> </listitem>
@ -1111,7 +1111,7 @@ FIXME: Correct description
--> -->
<para> <para>
Allow changing of the <acronym>WAL</acronym> Allow changing of the <acronym>WAL</acronym>
logging status of a table ater creation with <link logging status of a table after creation with <link
linkend="SQL-ALTERTABLE"><command>ALTER TABLE .. SET LOGGED / linkend="SQL-ALTERTABLE"><command>ALTER TABLE .. SET LOGGED /
UNLOGGED</></> (Fabr&iacute;zio de Royes Mello) UNLOGGED</></> (Fabr&iacute;zio de Royes Mello)
</para> </para>

View File

@ -251,7 +251,7 @@ ereport(ERROR,
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
<function>errdetail_log_plural(const char *fmt_singuar, const char <function>errdetail_log_plural(const char *fmt_singular, const char
*fmt_plural, unsigned long n, ...)</function> is like *fmt_plural, unsigned long n, ...)</function> is like
<function>errdetail_log</>, but with support for various plural forms of <function>errdetail_log</>, but with support for various plural forms of
the message. the message.

View File

@ -127,7 +127,7 @@ brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
* it's not long enough. * it's not long enough.
* *
* The returned buffer is also recorded in the revmap struct; finishing that * The returned buffer is also recorded in the revmap struct; finishing that
* releases the buffer, therefore the caller needn't do it explicitely. * releases the buffer, therefore the caller needn't do it explicitly.
*/ */
Buffer Buffer
brinLockRevmapPageForUpdate(BrinRevmap *revmap, BlockNumber heapBlk) brinLockRevmapPageForUpdate(BrinRevmap *revmap, BlockNumber heapBlk)
@ -314,7 +314,7 @@ revmap_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
* Obtain and return a buffer containing the revmap page for the given heap * Obtain and return a buffer containing the revmap page for the given heap
* page. The revmap must have been previously extended to cover that page. * page. The revmap must have been previously extended to cover that page.
* The returned buffer is also recorded in the revmap struct; finishing that * The returned buffer is also recorded in the revmap struct; finishing that
* releases the buffer, therefore the caller needn't do it explicitely. * releases the buffer, therefore the caller needn't do it explicitly.
*/ */
static Buffer static Buffer
revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk) revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)

View File

@ -805,7 +805,7 @@ heap_modify_tuple(HeapTuple tuple,
* repl information, as appropriate. * repl information, as appropriate.
* *
* NOTE: it's debatable whether to use heap_deform_tuple() here or just * NOTE: it's debatable whether to use heap_deform_tuple() here or just
* heap_getattr() only the non-replaced colums. The latter could win if * heap_getattr() only the non-replaced columns. The latter could win if
* there are many replaced columns and few non-replaced ones. However, * there are many replaced columns and few non-replaced ones. However,
* heap_deform_tuple costs only O(N) while the heap_getattr way would cost * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
* O(N^2) if there are many non-replaced columns, so it seems better to * O(N^2) if there are many non-replaced columns, so it seems better to

View File

@ -888,8 +888,8 @@ ginInsertCleanup(GinState *ginstate,
* locking */ * locking */
/* /*
* remove readed pages from pending list, at this point all * remove read pages from pending list, at this point all
* content of readed pages is in regular structure * content of read pages is in regular structure
*/ */
if (shiftList(index, metabuffer, blkno, stats)) if (shiftList(index, metabuffer, blkno, stats))
{ {

View File

@ -588,7 +588,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
* We first consider splits where b is the lower bound of an entry. * We first consider splits where b is the lower bound of an entry.
* We iterate through all entries, and for each b, calculate the * We iterate through all entries, and for each b, calculate the
* smallest possible a. Then we consider splits where a is the * smallest possible a. Then we consider splits where a is the
* uppper bound of an entry, and for each a, calculate the greatest * upper bound of an entry, and for each a, calculate the greatest
* possible b. * possible b.
* *
* In the above example, the first loop would consider splits: * In the above example, the first loop would consider splits:
@ -638,7 +638,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
} }
/* /*
* Iterate over upper bound of left group finding greates possible * Iterate over upper bound of left group finding greatest possible
* lower bound of right group. * lower bound of right group.
*/ */
i1 = nentries - 1; i1 = nentries - 1;

View File

@ -5473,7 +5473,7 @@ l4:
* *
* The initial tuple is assumed to be already locked. * The initial tuple is assumed to be already locked.
* *
* This function doesn't check visibility, it just inconditionally marks the * This function doesn't check visibility, it just unconditionally marks the
* tuple(s) as locked. If any tuple in the updated chain is being deleted * tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the * concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished. * transaction doing it is finished.
@ -6187,7 +6187,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/* /*
* NB -- some of these transformations are only valid because we * NB -- some of these transformations are only valid because we
* know the return Xid is a tuple updater (i.e. not merely a * know the return Xid is a tuple updater (i.e. not merely a
* locker.) Also note that the only reason we don't explicitely * locker.) Also note that the only reason we don't explicitly
* worry about HEAP_KEYS_UPDATED is because it lives in * worry about HEAP_KEYS_UPDATED is because it lives in
* t_infomask2 rather than t_infomask. * t_infomask2 rather than t_infomask.
*/ */

View File

@ -763,9 +763,9 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
* *
* Crash-Safety: This module diverts from the usual patterns of doing WAL * Crash-Safety: This module diverts from the usual patterns of doing WAL
* since it cannot rely on checkpoint flushing out all buffers and thus * since it cannot rely on checkpoint flushing out all buffers and thus
* waiting for exlusive locks on buffers. Usually the XLogInsert() covering * waiting for exclusive locks on buffers. Usually the XLogInsert() covering
* buffer modifications is performed while the buffer(s) that are being * buffer modifications is performed while the buffer(s) that are being
* modified are exlusively locked guaranteeing that both the WAL record and * modified are exclusively locked guaranteeing that both the WAL record and
* the modified heap are on either side of the checkpoint. But since the * the modified heap are on either side of the checkpoint. But since the
* mapping files we log aren't in shared_buffers that interlock doesn't work. * mapping files we log aren't in shared_buffers that interlock doesn't work.
* *

View File

@ -83,7 +83,7 @@ int synchronous_commit = SYNCHRONOUS_COMMIT_ON;
* When running as a parallel worker, we place only a single * When running as a parallel worker, we place only a single
* TransactionStateData on the parallel worker's state stack, and the XID * TransactionStateData on the parallel worker's state stack, and the XID
* reflected there will be that of the *innermost* currently-active * reflected there will be that of the *innermost* currently-active
* subtransaction in the backend that initiated paralllelism. However, * subtransaction in the backend that initiated parallelism. However,
* GetTopTransactionId() and TransactionIdIsCurrentTransactionId() * GetTopTransactionId() and TransactionIdIsCurrentTransactionId()
* need to return the same answers in the parallel worker as they would have * need to return the same answers in the parallel worker as they would have
* in the user backend, so we need some additional bookkeeping. * in the user backend, so we need some additional bookkeeping.

View File

@ -1039,7 +1039,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
/* /*
* The TID qual expressions will be computed once, any other baserestrict * The TID qual expressions will be computed once, any other baserestrict
* quals once per retrived tuple. * quals once per retrieved tuple.
*/ */
cost_qual_eval(&tid_qual_cost, tidquals, root); cost_qual_eval(&tid_qual_cost, tidquals, root);

View File

@ -1049,7 +1049,7 @@ replorigin_session_setup(RepOriginId node)
{ {
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE), (errcode(ERRCODE_OBJECT_IN_USE),
errmsg("replication identiefer %d is already active for pid %d", errmsg("replication identifier %d is already active for pid %d",
curstate->roident, curstate->acquired_by))); curstate->roident, curstate->acquired_by)));
} }

View File

@ -855,7 +855,7 @@ format_operator_internal(Oid operator_oid, bool force_qualify)
/* /*
* Would this oper be found (given the right args) by regoperatorin? * Would this oper be found (given the right args) by regoperatorin?
* If not, or if caller explicitely requests it, we need to qualify * If not, or if caller explicitly requests it, we need to qualify
* it. * it.
*/ */
if (force_qualify || !OperatorIsVisible(operator_oid)) if (force_qualify || !OperatorIsVisible(operator_oid))

View File

@ -54,13 +54,13 @@ typedef struct LWLock
slock_t mutex; /* Protects LWLock and queue of PGPROCs */ slock_t mutex; /* Protects LWLock and queue of PGPROCs */
uint16 tranche; /* tranche ID */ uint16 tranche; /* tranche ID */
pg_atomic_uint32 state; /* state of exlusive/nonexclusive lockers */ pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */
#ifdef LOCK_DEBUG #ifdef LOCK_DEBUG
pg_atomic_uint32 nwaiters; /* number of waiters */ pg_atomic_uint32 nwaiters; /* number of waiters */
#endif #endif
dlist_head waiters; /* list of waiting PGPROCs */ dlist_head waiters; /* list of waiting PGPROCs */
#ifdef LOCK_DEBUG #ifdef LOCK_DEBUG
struct PGPROC *owner; /* last exlusive owner of the lock */ struct PGPROC *owner; /* last exclusive owner of the lock */
#endif #endif
} LWLock; } LWLock;