Fix typos in comments.

Backpatch to all supported versions, where applicable, to make backpatching
of future fixes go more smoothly.

Josh Soref

Discussion: https://www.postgresql.org/message-id/CACZqfqCf+5qRztLPgmmosr-B0Ye4srWzzw_mo4c_8_B_mtjmJQ@mail.gmail.com
This commit is contained in:
Heikki Linnakangas 2017-02-06 11:33:58 +02:00
parent 9863017b87
commit 181bdb90ba
137 changed files with 195 additions and 195 deletions

4
configure vendored
View File

@ -7088,7 +7088,7 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
# When Autoconf chooses install-sh as install program it tries to generate # When Autoconf chooses install-sh as install program it tries to generate
# a relative path to it in each makefile where it subsitutes it. This clashes # a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps. # with our Makefile.global concept. This workaround helps.
case $INSTALL in case $INSTALL in
*install-sh*) install_bin='';; *install-sh*) install_bin='';;
@ -7232,7 +7232,7 @@ fi
$as_echo "$MKDIR_P" >&6; } $as_echo "$MKDIR_P" >&6; }
# When Autoconf chooses install-sh as mkdir -p program it tries to generate # When Autoconf chooses install-sh as mkdir -p program it tries to generate
# a relative path to it in each makefile where it subsitutes it. This clashes # a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps. # with our Makefile.global concept. This workaround helps.
case $MKDIR_P in case $MKDIR_P in
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';; *install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;

View File

@ -887,7 +887,7 @@ fi
AC_PROG_INSTALL AC_PROG_INSTALL
# When Autoconf chooses install-sh as install program it tries to generate # When Autoconf chooses install-sh as install program it tries to generate
# a relative path to it in each makefile where it subsitutes it. This clashes # a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps. # with our Makefile.global concept. This workaround helps.
case $INSTALL in case $INSTALL in
*install-sh*) install_bin='';; *install-sh*) install_bin='';;
@ -900,7 +900,7 @@ AC_PROG_LN_S
AC_PROG_AWK AC_PROG_AWK
AC_PROG_MKDIR_P AC_PROG_MKDIR_P
# When Autoconf chooses install-sh as mkdir -p program it tries to generate # When Autoconf chooses install-sh as mkdir -p program it tries to generate
# a relative path to it in each makefile where it subsitutes it. This clashes # a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps. # with our Makefile.global concept. This workaround helps.
case $MKDIR_P in case $MKDIR_P in
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';; *install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;

View File

@ -51,7 +51,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
initBloomState(&state, index); initBloomState(&state, index);
/* /*
* Interate over the pages. We don't care about concurrently added pages, * Iterate over the pages. We don't care about concurrently added pages,
* they can't contain tuples to delete. * they can't contain tuples to delete.
*/ */
npages = RelationGetNumberOfBlocks(index); npages = RelationGetNumberOfBlocks(index);

View File

@ -1056,7 +1056,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
5 5
(1 row) (1 row)
-- Test of cube_ll_coord function (retrieves LL coodinate values) -- Test of cube_ll_coord function (retrieves LL coordinate values)
-- --
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1); SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
cube_ll_coord cube_ll_coord
@ -1112,7 +1112,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
0 0
(1 row) (1 row)
-- Test of cube_ur_coord function (retrieves UR coodinate values) -- Test of cube_ur_coord function (retrieves UR coordinate values)
-- --
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1); SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
cube_ur_coord cube_ur_coord

View File

@ -1056,7 +1056,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
5 5
(1 row) (1 row)
-- Test of cube_ll_coord function (retrieves LL coodinate values) -- Test of cube_ll_coord function (retrieves LL coordinate values)
-- --
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1); SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
cube_ll_coord cube_ll_coord
@ -1112,7 +1112,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
0 0
(1 row) (1 row)
-- Test of cube_ur_coord function (retrieves UR coodinate values) -- Test of cube_ur_coord function (retrieves UR coordinate values)
-- --
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1); SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
cube_ur_coord cube_ur_coord

View File

@ -256,7 +256,7 @@ SELECT cube_dim('(0,0,0)'::cube);
SELECT cube_dim('(42,42,42),(42,42,42)'::cube); SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube); SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
-- Test of cube_ll_coord function (retrieves LL coodinate values) -- Test of cube_ll_coord function (retrieves LL coordinate values)
-- --
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1); SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2); SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
@ -268,7 +268,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 1);
SELECT cube_ll_coord('(42,137)'::cube, 2); SELECT cube_ll_coord('(42,137)'::cube, 2);
SELECT cube_ll_coord('(42,137)'::cube, 3); SELECT cube_ll_coord('(42,137)'::cube, 3);
-- Test of cube_ur_coord function (retrieves UR coodinate values) -- Test of cube_ur_coord function (retrieves UR coordinate values)
-- --
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1); SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2); SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);

View File

@ -11,7 +11,7 @@ CREATE FUNCTION earth() RETURNS float8
LANGUAGE SQL IMMUTABLE PARALLEL SAFE LANGUAGE SQL IMMUTABLE PARALLEL SAFE
AS 'SELECT ''6378168''::float8'; AS 'SELECT ''6378168''::float8';
-- Astromers may want to change the earth function so that distances will be -- Astronomers may want to change the earth function so that distances will be
-- returned in degrees. To do this comment out the above definition and -- returned in degrees. To do this comment out the above definition and
-- uncomment the one below. Note that doing this will break the regression -- uncomment the one below. Note that doing this will break the regression
-- tests. -- tests.

View File

@ -23,7 +23,7 @@
* Product 9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103 * Product 9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103
* 103 / 10 = 10 remainder 3 * 103 / 10 = 10 remainder 3
* Check digit 10 - 3 = 7 * Check digit 10 - 3 = 7
* => 977-1144875-00-7 ?? <- suplemental number (number of the week, month, etc.) * => 977-1144875-00-7 ?? <- supplemental number (number of the week, month, etc.)
* ^^ 00 for non-daily publications (01=Monday, 02=Tuesday, ...) * ^^ 00 for non-daily publications (01=Monday, 02=Tuesday, ...)
* *
* The hyphenation is always in after the four digits of the ISSN code. * The hyphenation is always in after the four digits of the ISSN code.

View File

@ -160,7 +160,7 @@ dehyphenate(char *bufO, char *bufI)
* into bufO using the given hyphenation range TABLE. * into bufO using the given hyphenation range TABLE.
* Assumes the input string to be used is of only digits. * Assumes the input string to be used is of only digits.
* *
* Returns the number of characters acctually hyphenated. * Returns the number of characters actually hyphenated.
*/ */
static unsigned static unsigned
hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned TABLE_index[10][2]) hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned TABLE_index[10][2])
@ -748,7 +748,7 @@ string2ean(const char *str, bool errorOK, ean13 *result,
} }
else if (*aux2 == '!' && *(aux2 + 1) == '\0') else if (*aux2 == '!' && *(aux2 + 1) == '\0')
{ {
/* the invalid check digit sufix was found, set it */ /* the invalid check digit suffix was found, set it */
if (!magic) if (!magic)
valid = false; valid = false;
magic = true; magic = true;

View File

@ -1113,7 +1113,7 @@ SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
t t
(1 row) (1 row)
--exractors --extractors
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null; SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
?column? ?column?
---------- ----------

View File

@ -197,7 +197,7 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
#define STACKDEPTH 32 #define STACKDEPTH 32
/* /*
* make polish notaion of query * make polish notation of query
*/ */
static int32 static int32
makepol(QPRS_STATE *state) makepol(QPRS_STATE *state)

View File

@ -209,7 +209,7 @@ SELECT 'a.b.c.d.e'::ltree ? '{A.b.c.d.e, a.*}';
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}'; SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}';
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}'; SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
--exractors --extractors
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null; SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4'; SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4';
SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3'; SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3';

View File

@ -779,7 +779,7 @@ main(int argc, char **argv)
{ {
/* /*
* Once we have restored this file successfully we can remove some * Once we have restored this file successfully we can remove some
* prior WAL files. If this restore fails we musn't remove any * prior WAL files. If this restore fails we mustn't remove any
* file because some of them will be requested again immediately * file because some of them will be requested again immediately
* after the failed restore, or when we restart recovery. * after the failed restore, or when we restart recovery.
*/ */

View File

@ -139,7 +139,7 @@ typedef struct Counters
{ {
int64 calls; /* # of times executed */ int64 calls; /* # of times executed */
double total_time; /* total execution time, in msec */ double total_time; /* total execution time, in msec */
double min_time; /* minimim execution time in msec */ double min_time; /* minimum execution time in msec */
double max_time; /* maximum execution time in msec */ double max_time; /* maximum execution time in msec */
double mean_time; /* mean execution time in msec */ double mean_time; /* mean execution time in msec */
double sum_var_time; /* sum of variances in execution time in msec */ double sum_var_time; /* sum of variances in execution time in msec */

View File

@ -413,7 +413,7 @@ comp_ptrgm(const void *v1, const void *v2)
* ulen1: count of unique trigrams of array "trg1". * ulen1: count of unique trigrams of array "trg1".
* len2: length of array "trg2" and array "trg2indexes". * len2: length of array "trg2" and array "trg2indexes".
* len: length of the array "found". * len: length of the array "found".
* check_only: if true then only check existaince of similar search pattern in * check_only: if true then only check existence of similar search pattern in
* text. * text.
* *
* Returns word similarity. * Returns word similarity.
@ -456,7 +456,7 @@ iterate_word_similarity(int *trg2indexes,
lastpos[trgindex] = i; lastpos[trgindex] = i;
} }
/* Adjust lower bound if this trigram is present in required substing */ /* Adjust lower bound if this trigram is present in required substring */
if (found[trgindex]) if (found[trgindex])
{ {
int prev_lower, int prev_lower,
@ -547,7 +547,7 @@ iterate_word_similarity(int *trg2indexes,
* *
* str1: search pattern string, of length slen1 bytes. * str1: search pattern string, of length slen1 bytes.
* str2: text in which we are looking for a word, of length slen2 bytes. * str2: text in which we are looking for a word, of length slen2 bytes.
* check_only: if true then only check existaince of similar search pattern in * check_only: if true then only check existence of similar search pattern in
* text. * text.
* *
* Returns word similarity. * Returns word similarity.

View File

@ -311,7 +311,7 @@ pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf)
} }
/* /*
* caller wants exatly len bytes and dont bother with references * caller wants exactly len bytes and don't bother with references
*/ */
int int
pullf_read_fixed(PullFilter *src, int len, uint8 *dst) pullf_read_fixed(PullFilter *src, int len, uint8 *dst)

View File

@ -141,7 +141,7 @@ bn_to_mpi(mpz_t *bn)
} }
/* /*
* Decide the number of bits in the random componont k * Decide the number of bits in the random component k
* *
* It should be in the same range as p for signing (which * It should be in the same range as p for signing (which
* is deprecated), but can be much smaller for encrypting. * is deprecated), but can be much smaller for encrypting.
@ -149,8 +149,8 @@ bn_to_mpi(mpz_t *bn)
* Until I research it further, I just mimic gpg behaviour. * Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120, * It has a special mapping table, for values <= 5120,
* above that it uses 'arbitrary high number'. Following * above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for * algorithm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm. * larger p, it uses gpg's algorithm.
* *
* The point is - if k gets large, encryption will be * The point is - if k gets large, encryption will be
* really slow. It does not matter for decryption. * really slow. It does not matter for decryption.

View File

@ -74,7 +74,7 @@ bn_to_mpi(BIGNUM *bn)
} }
/* /*
* Decide the number of bits in the random componont k * Decide the number of bits in the random component k
* *
* It should be in the same range as p for signing (which * It should be in the same range as p for signing (which
* is deprecated), but can be much smaller for encrypting. * is deprecated), but can be much smaller for encrypting.
@ -82,8 +82,8 @@ bn_to_mpi(BIGNUM *bn)
* Until I research it further, I just mimic gpg behaviour. * Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120, * It has a special mapping table, for values <= 5120,
* above that it uses 'arbitrary high number'. Following * above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for * algorithm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm. * larger p, it uses gpg's algorithm.
* *
* The point is - if k gets large, encryption will be * The point is - if k gets large, encryption will be
* really slow. It does not matter for decryption. * really slow. It does not matter for decryption.

View File

@ -2057,7 +2057,7 @@ SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM
1 1
(10 rows) (10 rows)
-- non-Var items in targelist of the nullable rel of a join preventing -- non-Var items in targetlist of the nullable rel of a join preventing
-- push-down in some cases -- push-down in some cases
-- unable to push {ft1, ft2} -- unable to push {ft1, ft2}
EXPLAIN (VERBOSE, COSTS OFF) EXPLAIN (VERBOSE, COSTS OFF)

View File

@ -493,7 +493,7 @@ EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10; SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10; SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
-- non-Var items in targelist of the nullable rel of a join preventing -- non-Var items in targetlist of the nullable rel of a join preventing
-- push-down in some cases -- push-down in some cases
-- unable to push {ft1, ft2} -- unable to push {ft1, ft2}
EXPLAIN (VERBOSE, COSTS OFF) EXPLAIN (VERBOSE, COSTS OFF)

View File

@ -888,7 +888,7 @@ restore(char *result, float val, int n)
if (Abs(exp) <= 4) if (Abs(exp) <= 4)
{ {
/* /*
* remove the decimal point from the mantyssa and write the digits * remove the decimal point from the mantissa and write the digits
* to the buf array * to the buf array
*/ */
for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++) for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++)

View File

@ -23,7 +23,7 @@
* When we ask SELinux whether the required privileges are allowed or not, * When we ask SELinux whether the required privileges are allowed or not,
* we use security_compute_av(3). It needs us to represent object classes * we use security_compute_av(3). It needs us to represent object classes
* and access vectors using 'external' codes defined in the security policy. * and access vectors using 'external' codes defined in the security policy.
* It is determinded in the runtime, not build time. So, it needs an internal * It is determined in the runtime, not build time. So, it needs an internal
* service to translate object class/access vectors which we want to check * service to translate object class/access vectors which we want to check
* into the code which kernel want to be given. * into the code which kernel want to be given.
*/ */

View File

@ -206,7 +206,7 @@ SELECT * FROM auth_tbl; -- failed
SELECT sepgsql_setcon(NULL); -- end of session SELECT sepgsql_setcon(NULL); -- end of session
SELECT sepgsql_getcon(); SELECT sepgsql_getcon();
-- the pooler cannot touch these tables directry -- the pooler cannot touch these tables directly
SELECT * FROM foo_tbl; -- failed SELECT * FROM foo_tbl; -- failed
SELECT * FROM var_tbl; -- failed SELECT * FROM var_tbl; -- failed

View File

@ -89,7 +89,7 @@ check_primary_key(PG_FUNCTION_ARGS)
/* internal error */ /* internal error */
elog(ERROR, "check_primary_key: cannot process DELETE events"); elog(ERROR, "check_primary_key: cannot process DELETE events");
/* If UPDATion the must check new Tuple, not old one */ /* If UPDATE, then must check new Tuple, not old one */
else else
tuple = trigdata->tg_newtuple; tuple = trigdata->tg_newtuple;

View File

@ -29,7 +29,7 @@
# modified by Ray Aspeitia 12-03-2003 : # modified by Ray Aspeitia 12-03-2003 :
# added log rotation script to db startup # added log rotation script to db startup
# modified StartupParameters.plist "Provides" parameter to make it easier to # modified StartupParameters.plist "Provides" parameter to make it easier to
# start and stop with the SystemStarter utitlity # start and stop with the SystemStarter utility
# use the below command in order to correctly start/stop/restart PG with log rotation script: # use the below command in order to correctly start/stop/restart PG with log rotation script:
# SystemStarter [start|stop|restart] PostgreSQL # SystemStarter [start|stop|restart] PostgreSQL

View File

@ -414,7 +414,7 @@ CREATE FUNCTION stat(text,text)
LANGUAGE INTERNAL LANGUAGE INTERNAL
RETURNS NULL ON NULL INPUT; RETURNS NULL ON NULL INPUT;
--reset - just for debuging --reset - just for debugging
CREATE FUNCTION reset_tsearch() CREATE FUNCTION reset_tsearch()
RETURNS void RETURNS void
as 'MODULE_PATHNAME', 'tsa_reset_tsearch' as 'MODULE_PATHNAME', 'tsa_reset_tsearch'

View File

@ -610,7 +610,7 @@ xpath_table(PG_FUNCTION_ARGS)
/* /*
* At the moment we assume that the returned attributes make sense for the * At the moment we assume that the returned attributes make sense for the
* XPath specififed (i.e. we trust the caller). It's not fatal if they get * XPath specified (i.e. we trust the caller). It's not fatal if they get
* it wrong - the input function for the column type will raise an error * it wrong - the input function for the column type will raise an error
* if the path result can't be converted into the correct binary * if the path result can't be converted into the correct binary
* representation. * representation.

View File

@ -377,7 +377,7 @@ $(shlib): $(OBJS) $(DLL_DEFFILE) | $(SHLIB_PREREQS)
$(CC) $(CFLAGS) -shared -static-libgcc -o $@ $(OBJS) $(DLL_DEFFILE) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib) $(CC) $(CFLAGS) -shared -static-libgcc -o $@ $(OBJS) $(DLL_DEFFILE) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib)
endif endif
endif # PORTNAME == cgywin endif # PORTNAME == cygwin
endif # PORTNAME == cygwin || PORTNAME == win32 endif # PORTNAME == cygwin || PORTNAME == win32

View File

@ -28,7 +28,7 @@ The current implementation of GiST supports:
The support for concurrency implemented in PostgreSQL was developed based on The support for concurrency implemented in PostgreSQL was developed based on
the paper "Access Methods for Next-Generation Database Systems" by the paper "Access Methods for Next-Generation Database Systems" by
Marcel Kornaker: Marcel Kornacker:
http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz

View File

@ -1077,7 +1077,7 @@ _hash_splitbucket_guts(Relation rel,
* already moved before the split operation was previously interrupted. * already moved before the split operation was previously interrupted.
* *
* The caller must hold a pin, but no lock, on the metapage and old bucket's * The caller must hold a pin, but no lock, on the metapage and old bucket's
* primay page buffer. The buffers are returned in the same state. (The * primary page buffer. The buffers are returned in the same state. (The
* metapage is only touched if it becomes necessary to add or remove overflow * metapage is only touched if it becomes necessary to add or remove overflow
* pages.) * pages.)
*/ */

View File

@ -209,7 +209,7 @@ typedef struct RewriteMappingFile
} RewriteMappingFile; } RewriteMappingFile;
/* /*
* A single In-Memeory logical rewrite mapping, hanging of * A single In-Memory logical rewrite mapping, hanging off
* RewriteMappingFile->mappings. * RewriteMappingFile->mappings.
*/ */
typedef struct RewriteMappingDataEntry typedef struct RewriteMappingDataEntry

View File

@ -615,7 +615,7 @@ CommitTsParameterChange(bool newvalue, bool oldvalue)
/* /*
* Activate this module whenever necessary. * Activate this module whenever necessary.
* This must happen during postmaster or standalong-backend startup, * This must happen during postmaster or standalone-backend startup,
* or during WAL replay anytime the track_commit_timestamp setting is * or during WAL replay anytime the track_commit_timestamp setting is
* changed in the master. * changed in the master.
* *

View File

@ -2752,7 +2752,7 @@ CommitTransactionCommand(void)
* These shouldn't happen. TBLOCK_DEFAULT means the previous * These shouldn't happen. TBLOCK_DEFAULT means the previous
* StartTransactionCommand didn't set the STARTED state * StartTransactionCommand didn't set the STARTED state
* appropriately, while TBLOCK_PARALLEL_INPROGRESS should be ended * appropriately, while TBLOCK_PARALLEL_INPROGRESS should be ended
* by EndParallelWorkerTranaction(), not this function. * by EndParallelWorkerTransaction(), not this function.
*/ */
case TBLOCK_DEFAULT: case TBLOCK_DEFAULT:
case TBLOCK_PARALLEL_INPROGRESS: case TBLOCK_PARALLEL_INPROGRESS:

View File

@ -770,7 +770,7 @@ static void getRelationIdentity(StringInfo buffer, Oid relid, List **objname);
* *
* Note: If the object is not found, we don't give any indication of the * Note: If the object is not found, we don't give any indication of the
* reason. (It might have been a missing schema if the name was qualified, or * reason. (It might have been a missing schema if the name was qualified, or
* an inexistant type name in case of a cast, function or operator; etc). * a nonexistent type name in case of a cast, function or operator; etc).
* Currently there is only one caller that might be interested in such info, so * Currently there is only one caller that might be interested in such info, so
* we don't spend much effort here. If more callers start to care, it might be * we don't spend much effort here. If more callers start to care, it might be
* better to add some support for that in this function. * better to add some support for that in this function.

View File

@ -34,7 +34,7 @@ static const char *get_am_type_string(char amtype);
/* /*
* CreateAcessMethod * CreateAccessMethod
* Registers a new access method. * Registers a new access method.
*/ */
ObjectAddress ObjectAddress

View File

@ -685,7 +685,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
/* /*
* Force synchronous commit, thus minimizing the window between * Force synchronous commit, thus minimizing the window between
* creation of the database files and commital of the transaction. If * creation of the database files and committal of the transaction. If
* we crash before committing, we'll have a DB that's taking up disk * we crash before committing, we'll have a DB that's taking up disk
* space but is not in pg_database, which is not good. * space but is not in pg_database, which is not good.
*/ */
@ -955,7 +955,7 @@ dropdb(const char *dbname, bool missing_ok)
/* /*
* Force synchronous commit, thus minimizing the window between removal of * Force synchronous commit, thus minimizing the window between removal of
* the database files and commital of the transaction. If we crash before * the database files and committal of the transaction. If we crash before
* committing, we'll have a DB that's gone on disk but still there * committing, we'll have a DB that's gone on disk but still there
* according to pg_database, which is not good. * according to pg_database, which is not good.
*/ */
@ -1309,7 +1309,7 @@ movedb(const char *dbname, const char *tblspcname)
/* /*
* Force synchronous commit, thus minimizing the window between * Force synchronous commit, thus minimizing the window between
* copying the database files and commital of the transaction. If we * copying the database files and committal of the transaction. If we
* crash before committing, we'll leave an orphaned set of files on * crash before committing, we'll leave an orphaned set of files on
* disk, which is not fatal but not good either. * disk, which is not fatal but not good either.
*/ */

View File

@ -3401,7 +3401,7 @@ ExplainYAMLLineStarting(ExplainState *es)
} }
/* /*
* YAML is a superset of JSON; unfortuantely, the YAML quoting rules are * YAML is a superset of JSON; unfortunately, the YAML quoting rules are
* ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of * ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of
* http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything. * http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything.
* Empty strings, strings with leading or trailing whitespace, and strings * Empty strings, strings with leading or trailing whitespace, and strings

View File

@ -1040,7 +1040,7 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt)
} }
else else
{ {
/* store SQL NULL instead of emtpy array */ /* store SQL NULL instead of empty array */
trftypes = NULL; trftypes = NULL;
} }
@ -1441,7 +1441,7 @@ CreateCast(CreateCastStmt *stmt)
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cast will be ignored because the target data type is a domain"))); errmsg("cast will be ignored because the target data type is a domain")));
/* Detemine the cast method */ /* Determine the cast method */
if (stmt->func != NULL) if (stmt->func != NULL)
castmethod = COERCION_METHOD_FUNCTION; castmethod = COERCION_METHOD_FUNCTION;
else if (stmt->inout) else if (stmt->inout)

View File

@ -99,7 +99,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
* Errors arising from the attribute list still apply. * Errors arising from the attribute list still apply.
* *
* Most column type changes that can skip a table rewrite do not invalidate * Most column type changes that can skip a table rewrite do not invalidate
* indexes. We ackowledge this when all operator classes, collations and * indexes. We acknowledge this when all operator classes, collations and
* exclusion operators match. Though we could further permit intra-opfamily * exclusion operators match. Though we could further permit intra-opfamily
* changes for btree and hash indexes, that adds subtle complexity with no * changes for btree and hash indexes, that adds subtle complexity with no
* concrete benefit for core types. * concrete benefit for core types.
@ -965,7 +965,7 @@ CheckMutability(Expr *expr)
* indxpath.c could do something with. However, that seems overly * indxpath.c could do something with. However, that seems overly
* restrictive. One useful application of partial indexes is to apply * restrictive. One useful application of partial indexes is to apply
* a UNIQUE constraint across a subset of a table, and in that scenario * a UNIQUE constraint across a subset of a table, and in that scenario
* any evaluatable predicate will work. So accept any predicate here * any evaluable predicate will work. So accept any predicate here
* (except ones requiring a plan), and let indxpath.c fend for itself. * (except ones requiring a plan), and let indxpath.c fend for itself.
*/ */
static void static void

View File

@ -525,7 +525,7 @@ OpenTableList(List *tables)
myrelid = RelationGetRelid(rel); myrelid = RelationGetRelid(rel);
/* /*
* filter out duplicates when user specifies "foo, foo" * filter out duplicates when user specifies "foo, foo"
* Note that this algrithm is know to not be very effective (O(N^2)) * Note that this algorithm is know to not be very effective (O(N^2))
* but given that it only works on list of tables given to us by user * but given that it only works on list of tables given to us by user
* it's deemed acceptable. * it's deemed acceptable.
*/ */

View File

@ -474,7 +474,7 @@ DropSubscription(DropSubscriptionStmt *stmt)
InvokeObjectDropHook(SubscriptionRelationId, subid, 0); InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
/* /*
* Lock the subscription so noboby else can do anything with it * Lock the subscription so nobody else can do anything with it
* (including the replication workers). * (including the replication workers).
*/ */
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock); LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);

View File

@ -6630,7 +6630,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/* /*
* Check if ONLY was specified with ALTER TABLE. If so, allow the * Check if ONLY was specified with ALTER TABLE. If so, allow the
* contraint creation only if there are no children currently. Error out * constraint creation only if there are no children currently. Error out
* otherwise. * otherwise.
*/ */
if (!recurse && children != NIL) if (!recurse && children != NIL)

View File

@ -1261,7 +1261,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_projectReturning = NULL; resultRelInfo->ri_projectReturning = NULL;
/* /*
* If partition_root has been specified, that means we are builiding the * If partition_root has been specified, that means we are building the
* ResultRelationInfo for one of its leaf partitions. In that case, we * ResultRelationInfo for one of its leaf partitions. In that case, we
* need *not* initialize the leaf partition's constraint, but rather the * need *not* initialize the leaf partition's constraint, but rather the
* the partition_root's (if any). We must do that explicitly like this, * the partition_root's (if any). We must do that explicitly like this,

View File

@ -533,7 +533,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
int plan_node_id = planstate->plan->plan_node_id; int plan_node_id = planstate->plan->plan_node_id;
MemoryContext oldcontext; MemoryContext oldcontext;
/* Find the instumentation for this node. */ /* Find the instrumentation for this node. */
for (i = 0; i < instrumentation->num_plan_nodes; ++i) for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id) if (instrumentation->plan_node_id[i] == plan_node_id)
break; break;

View File

@ -391,7 +391,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
if (rel->rd_att->constr) if (rel->rd_att->constr)
ExecConstraints(resultRelInfo, slot, slot, estate); ExecConstraints(resultRelInfo, slot, slot, estate);
/* Store the slot into tuple that we can insett. */ /* Store the slot into tuple that we can inspect. */
tuple = ExecMaterializeSlot(slot); tuple = ExecMaterializeSlot(slot);
/* OK, store the tuple and create index entries for it */ /* OK, store the tuple and create index entries for it */

View File

@ -304,7 +304,7 @@ typedef struct AggStatePerTransData
/* /*
* Slots for holding the evaluated input arguments. These are set up * Slots for holding the evaluated input arguments. These are set up
* during ExecInitAgg() and then used for each input row requiring * during ExecInitAgg() and then used for each input row requiring
* procesessing besides what's done in AggState->evalproj. * processing besides what's done in AggState->evalproj.
*/ */
TupleTableSlot *sortslot; /* current input tuple */ TupleTableSlot *sortslot; /* current input tuple */
TupleTableSlot *uniqslot; /* used for multi-column DISTINCT */ TupleTableSlot *uniqslot; /* used for multi-column DISTINCT */

View File

@ -354,7 +354,7 @@ advance_windowaggregate(WindowAggState *winstate,
/* /*
* We must track the number of rows included in transValue, since to * We must track the number of rows included in transValue, since to
* remove the last input, advance_windowaggregate_base() musn't call the * remove the last input, advance_windowaggregate_base() mustn't call the
* inverse transition function, but simply reset transValue back to its * inverse transition function, but simply reset transValue back to its
* initial value. * initial value.
*/ */

View File

@ -109,7 +109,7 @@ static MemoryContext parsed_hba_context = NULL;
* *
* NOTE: the IdentLine structs can contain pre-compiled regular expressions * NOTE: the IdentLine structs can contain pre-compiled regular expressions
* that live outside the memory context. Before destroying or resetting the * that live outside the memory context. Before destroying or resetting the
* memory context, they need to be expliticly free'd. * memory context, they need to be explicitly free'd.
*/ */
static List *parsed_ident_lines = NIL; static List *parsed_ident_lines = NIL;
static MemoryContext parsed_ident_context = NULL; static MemoryContext parsed_ident_context = NULL;

View File

@ -111,7 +111,7 @@ gimme_edge_table(PlannerInfo *root, Gene *tour1, Gene *tour2,
for (index1 = 0; index1 < num_gene; index1++) for (index1 = 0; index1 < num_gene; index1++)
{ {
/* /*
* presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operation
* maps n back to 1 * maps n back to 1
*/ */
@ -314,7 +314,7 @@ gimme_gene(PlannerInfo *root, Edge edge, Edge *edge_table)
/* /*
* give priority to candidates with fewest remaining unused edges; * give priority to candidates with fewest remaining unused edges;
* find out what the minimum number of unused edges is * find out what the minimum number of unused edges is
* (minimum_edges); if there is more than one cadidate with the * (minimum_edges); if there is more than one candidate with the
* minimum number of unused edges keep count of this number * minimum number of unused edges keep count of this number
* (minimum_count); * (minimum_count);
*/ */

View File

@ -1618,7 +1618,7 @@ select_mergejoin_clauses(PlannerInfo *root,
/* /*
* Insist that each side have a non-redundant eclass. This * Insist that each side have a non-redundant eclass. This
* restriction is needed because various bits of the planner expect * restriction is needed because various bits of the planner expect
* that each clause in a merge be associatable with some pathkey in a * that each clause in a merge be associable with some pathkey in a
* canonical pathkey list, but redundant eclasses can't appear in * canonical pathkey list, but redundant eclasses can't appear in
* canonical sort orderings. (XXX it might be worth relaxing this, * canonical sort orderings. (XXX it might be worth relaxing this,
* but not enough time to address it for 8.3.) * but not enough time to address it for 8.3.)

View File

@ -195,7 +195,7 @@ query_planner(PlannerInfo *root, List *tlist,
/* /*
* Now distribute "placeholders" to base rels as needed. This has to be * Now distribute "placeholders" to base rels as needed. This has to be
* done after join removal because removal could change whether a * done after join removal because removal could change whether a
* placeholder is evaluatable at a base rel. * placeholder is evaluable at a base rel.
*/ */
add_placeholders_to_base_rels(root); add_placeholders_to_base_rels(root);

View File

@ -24,7 +24,7 @@
* Detect whether there is a joinclause that involves * Detect whether there is a joinclause that involves
* the two given relations. * the two given relations.
* *
* Note: the joinclause does not have to be evaluatable with only these two * Note: the joinclause does not have to be evaluable with only these two
* relations. This is intentional. For example consider * relations. This is intentional. For example consider
* SELECT * FROM a, b, c WHERE a.x = (b.y + c.z) * SELECT * FROM a, b, c WHERE a.x = (b.y + c.z)
* If a is much larger than the other tables, it may be worthwhile to * If a is much larger than the other tables, it may be worthwhile to

View File

@ -515,7 +515,7 @@ join_clause_is_movable_into(RestrictInfo *rinfo,
Relids currentrelids, Relids currentrelids,
Relids current_and_outer) Relids current_and_outer)
{ {
/* Clause must be evaluatable given available context */ /* Clause must be evaluable given available context */
if (!bms_is_subset(rinfo->clause_relids, current_and_outer)) if (!bms_is_subset(rinfo->clause_relids, current_and_outer))
return false; return false;

View File

@ -11312,7 +11312,7 @@ table_ref: relation_expr opt_alias_clause
n->lateral = true; n->lateral = true;
n->subquery = $2; n->subquery = $2;
n->alias = $3; n->alias = $3;
/* same coment as above */ /* same comment as above */
if ($3 == NULL) if ($3 == NULL)
{ {
if (IsA($2, SelectStmt) && if (IsA($2, SelectStmt) &&

View File

@ -3050,7 +3050,7 @@ transformAttachPartition(CreateStmtContext *cxt, PartitionCmd *cmd)
errmsg("\"%s\" is not partitioned", errmsg("\"%s\" is not partitioned",
RelationGetRelationName(parentRel)))); RelationGetRelationName(parentRel))));
/* tranform the values */ /* transform the values */
Assert(RelationGetPartitionKey(parentRel) != NULL); Assert(RelationGetPartitionKey(parentRel) != NULL);
cxt->partbound = transformPartitionBound(cxt->pstate, parentRel, cxt->partbound = transformPartitionBound(cxt->pstate, parentRel,
cmd->bound); cmd->bound);

View File

@ -211,7 +211,7 @@ BackgroundWriterMain(void)
/* Flush any leaked data in the top-level context */ /* Flush any leaked data in the top-level context */
MemoryContextResetAndDeleteChildren(bgwriter_context); MemoryContextResetAndDeleteChildren(bgwriter_context);
/* re-initilialize to avoid repeated errors causing problems */ /* re-initialize to avoid repeated errors causing problems */
WritebackContextInit(&wb_context, &bgwriter_flush_after); WritebackContextInit(&wb_context, &bgwriter_flush_after);
/* Now we can allow interrupts again */ /* Now we can allow interrupts again */

View File

@ -5156,7 +5156,7 @@ RandomCancelKey(int32 *cancel_key)
} }
/* /*
* Count up number of child processes of specified types (dead_end chidren * Count up number of child processes of specified types (dead_end children
* are always excluded). * are always excluded).
*/ */
static int static int

View File

@ -170,7 +170,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
/* /*
* Worker started and attached to our shmem. This check is safe * Worker started and attached to our shmem. This check is safe
* because only laucher ever starts the workers, so nobody can steal * because only launcher ever starts the workers, so nobody can steal
* the worker slot. * the worker slot.
*/ */
if (status == BGWH_STARTED && worker->proc) if (status == BGWH_STARTED && worker->proc)
@ -180,7 +180,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
return false; return false;
/* /*
* We need timeout because we generaly don't get notified via latch * We need timeout because we generally don't get notified via latch
* about the worker attach. * about the worker attach.
*/ */
rc = WaitLatch(MyLatch, rc = WaitLatch(MyLatch,
@ -533,7 +533,7 @@ AtCommit_ApplyLauncher(void)
/* /*
* Request wakeup of the launcher on commit of the transaction. * Request wakeup of the launcher on commit of the transaction.
* *
* This is used to send launcher signal to stop sleeping and proccess the * This is used to send launcher signal to stop sleeping and process the
* subscriptions when current transaction commits. Should be used when new * subscriptions when current transaction commits. Should be used when new
* tuple was added to the pg_subscription catalog. * tuple was added to the pg_subscription catalog.
*/ */
@ -638,7 +638,7 @@ ApplyLauncherMain(Datum main_arg)
else else
{ {
/* /*
* The wait in previous cycle was interruped in less than * The wait in previous cycle was interrupted in less than
* wal_retrieve_retry_interval since last worker was started, * wal_retrieve_retry_interval since last worker was started,
* this usually means crash of the worker, so we should retry * this usually means crash of the worker, so we should retry
* in wal_retrieve_retry_interval again. * in wal_retrieve_retry_interval again.

View File

@ -1250,7 +1250,7 @@ pg_replication_origin_session_is_setup(PG_FUNCTION_ARGS)
* Return the replication progress for origin setup in the current session. * Return the replication progress for origin setup in the current session.
* *
* If 'flush' is set to true it is ensured that the returned value corresponds * If 'flush' is set to true it is ensured that the returned value corresponds
* to a local transaction that has been flushed. this is useful if asychronous * to a local transaction that has been flushed. this is useful if asynchronous
* commits are used when replaying replicated transactions. * commits are used when replaying replicated transactions.
*/ */
Datum Datum
@ -1336,7 +1336,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
* Return the replication progress for an individual replication origin. * Return the replication progress for an individual replication origin.
* *
* If 'flush' is set to true it is ensured that the returned value corresponds * If 'flush' is set to true it is ensured that the returned value corresponds
* to a local transaction that has been flushed. this is useful if asychronous * to a local transaction that has been flushed. this is useful if asynchronous
* commits are used when replaying replicated transactions. * commits are used when replaying replicated transactions.
*/ */
Datum Datum

View File

@ -539,7 +539,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)
if (att->attisdropped) if (att->attisdropped)
continue; continue;
/* REPLICA IDENTITY FULL means all colums are sent as part of key. */ /* REPLICA IDENTITY FULL means all columns are sent as part of key. */
if (replidentfull || if (replidentfull ||
bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
idattrs)) idattrs))

View File

@ -1714,7 +1714,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
* *
* NB: Transactions handled here have to have actively aborted (i.e. have * NB: Transactions handled here have to have actively aborted (i.e. have
* produced an abort record). Implicitly aborted transactions are handled via * produced an abort record). Implicitly aborted transactions are handled via
* ReorderBufferAbortOld(); transactions we're just not interesteded in, but * ReorderBufferAbortOld(); transactions we're just not interested in, but
* which have committed are handled in ReorderBufferForget(). * which have committed are handled in ReorderBufferForget().
* *
* This function purges this transaction and its contents from memory and * This function purges this transaction and its contents from memory and
@ -1782,7 +1782,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid)
* toplevel xid. * toplevel xid.
* *
* This is significantly different to ReorderBufferAbort() because * This is significantly different to ReorderBufferAbort() because
* transactions that have committed need to be treated differenly from aborted * transactions that have committed need to be treated differently from aborted
* ones since they may have modified the catalog. * ones since they may have modified the catalog.
* *
* Note that this is only allowed to be called in the moment a transaction * Note that this is only allowed to be called in the moment a transaction
@ -2660,7 +2660,7 @@ StartupReorderBuffer(void)
/* /*
* ok, has to be a surviving logical slot, iterate and delete * ok, has to be a surviving logical slot, iterate and delete
* everythign starting with xid-* * everything starting with xid-*
*/ */
sprintf(path, "pg_replslot/%s", logical_de->d_name); sprintf(path, "pg_replslot/%s", logical_de->d_name);

View File

@ -614,7 +614,7 @@ SnapBuildGetOrBuildSnapshot(SnapBuild *builder, TransactionId xid)
if (builder->snapshot == NULL) if (builder->snapshot == NULL)
{ {
builder->snapshot = SnapBuildBuildSnapshot(builder, xid); builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
/* inrease refcount for the snapshot builder */ /* increase refcount for the snapshot builder */
SnapBuildSnapIncRefcount(builder->snapshot); SnapBuildSnapIncRefcount(builder->snapshot);
} }
@ -678,7 +678,7 @@ SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn)
if (builder->snapshot == NULL) if (builder->snapshot == NULL)
{ {
builder->snapshot = SnapBuildBuildSnapshot(builder, xid); builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
/* inrease refcount for the snapshot builder */ /* increase refcount for the snapshot builder */
SnapBuildSnapIncRefcount(builder->snapshot); SnapBuildSnapIncRefcount(builder->snapshot);
} }
@ -911,7 +911,7 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
{ {
/* /*
* None of the originally running transaction is running anymore, * None of the originally running transaction is running anymore,
* so our incrementaly built snapshot now is consistent. * so our incrementally built snapshot now is consistent.
*/ */
ereport(LOG, ereport(LOG,
(errmsg("logical decoding found consistent point at %X/%X", (errmsg("logical decoding found consistent point at %X/%X",

View File

@ -327,7 +327,7 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
/* /*
* Modify slot with user data provided as C strigs. * Modify slot with user data provided as C strigs.
* This is somewhat similar to heap_modify_tuple but also calls the type * This is somewhat similar to heap_modify_tuple but also calls the type
* input fuction on the user data as the input is the text representation * input function on the user data as the input is the text representation
* of the types. * of the types.
*/ */
static void static void

View File

@ -172,7 +172,7 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
&data->protocol_version, &data->protocol_version,
&data->publication_names); &data->publication_names);
/* Check if we support requested protol */ /* Check if we support requested protocol */
if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM) if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@ -424,7 +424,7 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
/* /*
* Initialize the relation schema sync cache for a decoding session. * Initialize the relation schema sync cache for a decoding session.
* *
* The hash table is destoyed at the end of a decoding session. While * The hash table is destroyed at the end of a decoding session. While
* relcache invalidations still exist and will still be invoked, they * relcache invalidations still exist and will still be invoked, they
* will just see the null hash table global and take no action. * will just see the null hash table global and take no action.
*/ */
@ -540,7 +540,7 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
/* /*
* We can get here if the plugin was used in SQL interface as the * We can get here if the plugin was used in SQL interface as the
* RelSchemaSyncCache is detroyed when the decoding finishes, but there * RelSchemaSyncCache is destroyed when the decoding finishes, but there
* is no way to unregister the relcache invalidation callback. * is no way to unregister the relcache invalidation callback.
*/ */
if (RelationSyncCache == NULL) if (RelationSyncCache == NULL)
@ -580,7 +580,7 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
/* /*
* We can get here if the plugin was used in SQL interface as the * We can get here if the plugin was used in SQL interface as the
* RelSchemaSyncCache is detroyed when the decoding finishes, but there * RelSchemaSyncCache is destroyed when the decoding finishes, but there
* is no way to unregister the relcache invalidation callback. * is no way to unregister the relcache invalidation callback.
*/ */
if (RelationSyncCache == NULL) if (RelationSyncCache == NULL)

View File

@ -860,7 +860,7 @@ WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
* reached. At most nevents occurred events are returned. * reached. At most nevents occurred events are returned.
* *
* If timeout = -1, block until an event occurs; if 0, check sockets for * If timeout = -1, block until an event occurs; if 0, check sockets for
* readiness, but don't block; if > 0, block for at most timeout miliseconds. * readiness, but don't block; if > 0, block for at most timeout milliseconds.
* *
* Returns the number of events occurred, or 0 if the timeout was reached. * Returns the number of events occurred, or 0 if the timeout was reached.
* *

View File

@ -501,7 +501,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
* it will point to a temporary buffer. This mostly avoids data copying in * it will point to a temporary buffer. This mostly avoids data copying in
* the hoped-for case where messages are short compared to the buffer size, * the hoped-for case where messages are short compared to the buffer size,
* while still allowing longer messages. In either case, the return value * while still allowing longer messages. In either case, the return value
* remains valid until the next receive operation is perfomed on the queue. * remains valid until the next receive operation is performed on the queue.
* *
* When nowait = false, we'll wait on our process latch when the ring buffer * When nowait = false, we'll wait on our process latch when the ring buffer
* is empty and we have not yet received a full message. The sender will * is empty and we have not yet received a full message. The sender will

View File

@ -967,7 +967,7 @@ LogStandbySnapshot(void)
* similar. We keep them separate because xl_xact_running_xacts is a * similar. We keep them separate because xl_xact_running_xacts is a
* contiguous chunk of memory and never exists fully until it is assembled in * contiguous chunk of memory and never exists fully until it is assembled in
* WAL. The inserted records are marked as not being important for durability, * WAL. The inserted records are marked as not being important for durability,
* to avoid triggering superflous checkpoint / archiving activity. * to avoid triggering superfluous checkpoint / archiving activity.
*/ */
static XLogRecPtr static XLogRecPtr
LogCurrentRunningXacts(RunningTransactions CurrRunningXacts) LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)

View File

@ -2778,7 +2778,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
vxids = (VirtualTransactionId *) vxids = (VirtualTransactionId *)
palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1)); palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
/* Compute hash code and partiton lock, and look up conflicting modes. */ /* Compute hash code and partition lock, and look up conflicting modes. */
hashcode = LockTagHashCode(locktag); hashcode = LockTagHashCode(locktag);
partitionLock = LockHashPartitionLock(hashcode); partitionLock = LockHashPartitionLock(hashcode);
conflictMask = lockMethodTable->conflictTab[lockmode]; conflictMask = lockMethodTable->conflictTab[lockmode];

View File

@ -781,7 +781,7 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode)
return false; return false;
} }
else else
return true; /* someobdy else has the lock */ return true; /* somebody else has the lock */
} }
} }
pg_unreachable(); pg_unreachable();
@ -953,7 +953,7 @@ LWLockWakeup(LWLock *lock)
* that happens before the list unlink happens, the list would end up * that happens before the list unlink happens, the list would end up
* being corrupted. * being corrupted.
* *
* The barrier pairs with the LWLockWaitListLock() when enqueueing for * The barrier pairs with the LWLockWaitListLock() when enqueuing for
* another lock. * another lock.
*/ */
pg_write_barrier(); pg_write_barrier();
@ -1029,7 +1029,7 @@ LWLockDequeueSelf(LWLock *lock)
/* /*
* Can't just remove ourselves from the list, but we need to iterate over * Can't just remove ourselves from the list, but we need to iterate over
* all entries as somebody else could have unqueued us. * all entries as somebody else could have dequeued us.
*/ */
proclist_foreach_modify(iter, &lock->waiters, lwWaitLink) proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
{ {

View File

@ -3193,7 +3193,7 @@ ReleasePredicateLocks(bool isCommit)
/* /*
* We can't trust XactReadOnly here, because a transaction which started * We can't trust XactReadOnly here, because a transaction which started
* as READ WRITE can show as READ ONLY later, e.g., within * as READ WRITE can show as READ ONLY later, e.g., within
* substransactions. We want to flag a transaction as READ ONLY if it * subtransactions. We want to flag a transaction as READ ONLY if it
* commits without writing so that de facto READ ONLY transactions get the * commits without writing so that de facto READ ONLY transactions get the
* benefit of some RO optimizations, so we will use this local variable to * benefit of some RO optimizations, so we will use this local variable to
* get some cleanup logic right which is based on whether the transaction * get some cleanup logic right which is based on whether the transaction

View File

@ -1728,7 +1728,7 @@ _fdvec_resize(SMgrRelation reln,
else else
{ {
/* /*
* It doesn't seem worthwile complicating the code by having a more * It doesn't seem worthwhile complicating the code by having a more
* aggressive growth strategy here; the number of segments doesn't * aggressive growth strategy here; the number of segments doesn't
* grow that fast, and the memory context internally will sometimes * grow that fast, and the memory context internally will sometimes
* avoid doing an actual reallocation. * avoid doing an actual reallocation.

View File

@ -37,7 +37,7 @@
* Spell field. The AffixData field is initialized if AF parameter is not * Spell field. The AffixData field is initialized if AF parameter is not
* defined. * defined.
* - NISortAffixes(): * - NISortAffixes():
* - builds a list of compond affixes from the affix list and stores it * - builds a list of compound affixes from the affix list and stores it
* in the CompoundAffix. * in the CompoundAffix.
* - builds prefix trees (Trie) from the affix list for prefixes and suffixes * - builds prefix trees (Trie) from the affix list for prefixes and suffixes
* and stores them in Suffix and Prefix fields. * and stores them in Suffix and Prefix fields.

View File

@ -179,7 +179,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
if (ld->curDictId == InvalidOid) if (ld->curDictId == InvalidOid)
{ {
/* /*
* usial mode: dictionary wants only one word, but we should keep in * usual mode: dictionary wants only one word, but we should keep in
* mind that we should go through all stack * mind that we should go through all stack
*/ */
@ -272,7 +272,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
/* /*
* We should be sure that current type of lexeme is recognized * We should be sure that current type of lexeme is recognized
* by our dictinonary: we just check is it exist in list of * by our dictionary: we just check is it exist in list of
* dictionaries ? * dictionaries ?
*/ */
for (i = 0; i < map->len && !dictExists; i++) for (i = 0; i < map->len && !dictExists; i++)
@ -627,7 +627,7 @@ generateHeadline(HeadlineParsedText *prs)
/* start of a new fragment */ /* start of a new fragment */
infrag = 1; infrag = 1;
numfragments++; numfragments++;
/* add a fragment delimitor if this is after the first one */ /* add a fragment delimiter if this is after the first one */
if (numfragments > 1) if (numfragments > 1)
{ {
memcpy(ptr, prs->fragdelim, prs->fragdelimlen); memcpy(ptr, prs->fragdelim, prs->fragdelimlen);

View File

@ -2445,7 +2445,7 @@ mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight,
break; break;
} }
if (curlen < min_words && i >= prs->curwords) if (curlen < min_words && i >= prs->curwords)
{ /* got end of text and our cover is shoter { /* got end of text and our cover is shorter
* than min_words */ * than min_words */
for (i = p - 1; i >= 0; i--) for (i = p - 1; i >= 0; i--)
{ {

View File

@ -2265,7 +2265,7 @@ seq_search(char *name, const char *const * array, int type, int max, int *len)
for (last = 0, a = array; *a != NULL; a++) for (last = 0, a = array; *a != NULL; a++)
{ {
/* comperate first chars */ /* compare first chars */
if (*name != **a) if (*name != **a)
continue; continue;

View File

@ -533,7 +533,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
{ {
/* /*
* Lower bound no longer matters. Just estimate the fraction * Lower bound no longer matters. Just estimate the fraction
* with an upper bound <= const uppert bound * with an upper bound <= const upper bound
*/ */
hist_selec = hist_selec =
calc_hist_selectivity_scalar(typcache, &const_upper, calc_hist_selectivity_scalar(typcache, &const_upper,

View File

@ -2687,7 +2687,7 @@ is_input_argument(int nth, const char *argmodes)
} }
/* /*
* Append used transformated types to specified buffer * Append used transformed types to specified buffer
*/ */
static void static void
print_function_trftypes(StringInfo buf, HeapTuple proctup) print_function_trftypes(StringInfo buf, HeapTuple proctup)

View File

@ -899,7 +899,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method)
/* /*
* if doc are big enough then ext.q may be equal to ext.p due to limit * if doc are big enough then ext.q may be equal to ext.p due to limit
* of posional information. In this case we approximate number of * of positional information. In this case we approximate number of
* noise word as half cover's length * noise word as half cover's length
*/ */
nNoise = (ext.q - ext.p) - (ext.end - ext.begin); nNoise = (ext.q - ext.p) - (ext.end - ext.begin);
@ -908,7 +908,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method)
Wdoc += Cpos / ((double) (1 + nNoise)); Wdoc += Cpos / ((double) (1 + nNoise));
CurExtPos = ((double) (ext.q + ext.p)) / 2.0; CurExtPos = ((double) (ext.q + ext.p)) / 2.0;
if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent devision by if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent division by
* zero in a case of * zero in a case of
multiple lexize */ ) multiple lexize */ )
SumDist += 1.0 / (CurExtPos - PrevExtPos); SumDist += 1.0 / (CurExtPos - PrevExtPos);

View File

@ -342,7 +342,7 @@ window_lag(PG_FUNCTION_ARGS)
/* /*
* lag_with_offset * lag_with_offset
* returns the value of VE evelulated on a row that is OFFSET * returns the value of VE evaluated on a row that is OFFSET
* rows before the current row within a partition, * rows before the current row within a partition,
* per spec. * per spec.
*/ */

View File

@ -1433,7 +1433,7 @@ RelationInitPhysicalAddr(Relation relation)
* points to the current file since the older file will be gone (or * points to the current file since the older file will be gone (or
* truncated). The new file will still contain older rows so lookups * truncated). The new file will still contain older rows so lookups
* in them will work correctly. This wouldn't work correctly if * in them will work correctly. This wouldn't work correctly if
* rewrites were allowed to change the schema in a noncompatible way, * rewrites were allowed to change the schema in an incompatible way,
* but those are prevented both on catalog tables and on user tables * but those are prevented both on catalog tables and on user tables
* declared as additional catalog tables. * declared as additional catalog tables.
*/ */

View File

@ -879,7 +879,7 @@ get_func_arg_info(HeapTuple procTup,
/* /*
* get_func_trftypes * get_func_trftypes
* *
* Returns a number of transformated types used by function. * Returns the number of transformed types used by function.
*/ */
int int
get_func_trftypes(HeapTuple procTup, get_func_trftypes(HeapTuple procTup,

View File

@ -1108,7 +1108,7 @@ process_settings(Oid databaseid, Oid roleid)
relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock); relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock);
/* read all the settings under the same snapsot for efficiency */ /* read all the settings under the same snapshot for efficiency */
snapshot = RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId)); snapshot = RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId));
/* Later settings are ignored if set earlier. */ /* Later settings are ignored if set earlier. */

View File

@ -19,7 +19,7 @@ OBJS = backend_random.o guc.o help_config.o pg_config.o pg_controldata.o \
tzparser.o tzparser.o
# This location might depend on the installation directories. Therefore # This location might depend on the installation directories. Therefore
# we can't subsitute it into pg_config.h. # we can't substitute it into pg_config.h.
ifdef krb_srvtab ifdef krb_srvtab
override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"' override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"'
endif endif

View File

@ -318,7 +318,7 @@ sum_free_pages(FreePageManager *fpm)
/* /*
* Compute the size of the largest run of pages that the user could * Compute the size of the largest run of pages that the user could
* succesfully get. * successfully get.
*/ */
static Size static Size
FreePageManagerLargestContiguous(FreePageManager *fpm) FreePageManagerLargestContiguous(FreePageManager *fpm)
@ -360,7 +360,7 @@ FreePageManagerLargestContiguous(FreePageManager *fpm)
/* /*
* Recompute the size of the largest run of pages that the user could * Recompute the size of the largest run of pages that the user could
* succesfully get, if it has been marked dirty. * successfully get, if it has been marked dirty.
*/ */
static void static void
FreePageManagerUpdateLargest(FreePageManager *fpm) FreePageManagerUpdateLargest(FreePageManager *fpm)
@ -1704,7 +1704,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
* The act of allocating pages for use in constructing our btree * The act of allocating pages for use in constructing our btree
* should never cause any page to become more full, so the new * should never cause any page to become more full, so the new
* split depth should be no greater than the old one, and perhaps * split depth should be no greater than the old one, and perhaps
* less if we fortutiously allocated a chunk that freed up a slot * less if we fortuitously allocated a chunk that freed up a slot
* on the page we need to update. * on the page we need to update.
*/ */
Assert(result.split_pages <= fpm->btree_recycle_count); Assert(result.split_pages <= fpm->btree_recycle_count);

View File

@ -1625,7 +1625,7 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
} }
/* /*
* check whether the transaciont id 'xid' is in the pre-sorted array 'xip'. * check whether the transaction id 'xid' is in the pre-sorted array 'xip'.
*/ */
static bool static bool
TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num) TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)

View File

@ -198,7 +198,7 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
* *
* Optional. * Optional.
* *
* Set up extrac format-related TOC data. * Set up extract format-related TOC data.
*/ */
static void static void
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te) _ArchiveEntry(ArchiveHandle *AH, TocEntry *te)

View File

@ -3500,7 +3500,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
resetPQExpBuffer(query); resetPQExpBuffer(query);
/* Get the publication memebership for the table. */ /* Get the publication membership for the table. */
appendPQExpBuffer(query, appendPQExpBuffer(query,
"SELECT pr.tableoid, pr.oid, p.pubname " "SELECT pr.tableoid, pr.oid, p.pubname "
"FROM pg_catalog.pg_publication_rel pr," "FROM pg_catalog.pg_publication_rel pr,"

View File

@ -828,7 +828,7 @@ StoreQueryTuple(const PGresult *result)
char *varname; char *varname;
char *value; char *value;
/* concate prefix and column name */ /* concatenate prefix and column name */
varname = psprintf("%s%s", pset.gset_prefix, colname); varname = psprintf("%s%s", pset.gset_prefix, colname);
if (!PQgetisnull(result, 0, i)) if (!PQgetisnull(result, 0, i))

View File

@ -2127,7 +2127,7 @@ describeOneTableDetails(const char *schemaname,
printTableAddFooter(&cont, _("Check constraints:")); printTableAddFooter(&cont, _("Check constraints:"));
for (i = 0; i < tuples; i++) for (i = 0; i < tuples; i++)
{ {
/* untranslated contraint name and def */ /* untranslated constraint name and def */
printfPQExpBuffer(&buf, " \"%s\" %s", printfPQExpBuffer(&buf, " \"%s\" %s",
PQgetvalue(result, i, 0), PQgetvalue(result, i, 0),
PQgetvalue(result, i, 1)); PQgetvalue(result, i, 1));
@ -3197,7 +3197,7 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys
if (verbose) if (verbose)
{ {
/* /*
* As of PostgreSQL 9.0, use pg_table_size() to show a more acurate * As of PostgreSQL 9.0, use pg_table_size() to show a more accurate
* size of a table, including FSM, VM and TOAST tables. * size of a table, including FSM, VM and TOAST tables.
*/ */
if (pset.sversion >= 90000) if (pset.sversion >= 90000)
@ -5108,7 +5108,7 @@ describeSubscriptions(const char *pattern, bool verbose)
gettext_noop("Conninfo")); gettext_noop("Conninfo"));
} }
/* Only display subscritpions in current database. */ /* Only display subscriptions in current database. */
appendPQExpBufferStr(&buf, appendPQExpBufferStr(&buf,
"FROM pg_catalog.pg_subscription\n" "FROM pg_catalog.pg_subscription\n"
"WHERE subdbid = (SELECT oid\n" "WHERE subdbid = (SELECT oid\n"

View File

@ -26,7 +26,7 @@
#define VISIBILITYMAP_ALL_VISIBLE 0x01 #define VISIBILITYMAP_ALL_VISIBLE 0x01
#define VISIBILITYMAP_ALL_FROZEN 0x02 #define VISIBILITYMAP_ALL_FROZEN 0x02
#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid #define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid
* visiblitymap flags bits */ * visibilitymap flags bits */
/* Macros for visibilitymap test */ /* Macros for visibilitymap test */
#define VM_ALL_VISIBLE(r, b, v) \ #define VM_ALL_VISIBLE(r, b, v) \

View File

@ -65,7 +65,7 @@ typedef enum
* apply */ * apply */
} SyncCommitLevel; } SyncCommitLevel;
/* Define the default setting for synchonous_commit */ /* Define the default setting for synchronous_commit */
#define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH #define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH
/* Synchronous commit level */ /* Synchronous commit level */

View File

@ -989,7 +989,7 @@ typedef NameData *Name;
/* gettext domain name mangling */ /* gettext domain name mangling */
/* /*
* To better support parallel installations of major PostgeSQL * To better support parallel installations of major PostgreSQL
* versions as well as parallel installations of major library soname * versions as well as parallel installations of major library soname
* versions, we mangle the gettext domain name by appending those * versions, we mangle the gettext domain name by appending those
* version numbers. The coding rule ought to be that wherever the * version numbers. The coding rule ought to be that wherever the

View File

@ -41,7 +41,7 @@ typedef struct PartitionDescData *PartitionDesc;
/*----------------------- /*-----------------------
* PartitionDispatch - information about one partitioned table in a partition * PartitionDispatch - information about one partitioned table in a partition
* hiearchy required to route a tuple to one of its partitions * hierarchy required to route a tuple to one of its partitions
* *
* reldesc Relation descriptor of the table * reldesc Relation descriptor of the table
* key Partition key information of the table * key Partition key information of the table

View File

@ -23,7 +23,7 @@
#define SubscriptionRelation_Rowtype_Id 6101 #define SubscriptionRelation_Rowtype_Id 6101
/* /*
* Technicaly, the subscriptions live inside the database, so a shared catalog * Technically, the subscriptions live inside the database, so a shared catalog
* seems weird, but the replication launcher process needs to access all of * seems weird, but the replication launcher process needs to access all of
* them to be able to start the workers, so we have to put them in a shared, * them to be able to start the workers, so we have to put them in a shared,
* nailed catalog. * nailed catalog.
@ -35,7 +35,7 @@ CATALOG(pg_subscription,6100) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6101) BKI_SCHE
Oid subowner; /* Owner of the subscription */ Oid subowner; /* Owner of the subscription */
bool subenabled; /* True if the subsription is enabled bool subenabled; /* True if the subscription is enabled
* (the worker should be running) */ * (the worker should be running) */
#ifdef CATALOG_VARLEN /* variable-length fields start here */ #ifdef CATALOG_VARLEN /* variable-length fields start here */
@ -65,7 +65,7 @@ typedef FormData_pg_subscription *Form_pg_subscription;
typedef struct Subscription typedef struct Subscription
{ {
Oid oid; /* Oid of the subscription */ Oid oid; /* Oid of the subscription */
Oid dbid; /* Oid of the database which dubscription is in */ Oid dbid; /* Oid of the database which subscription is in */
char *name; /* Name of the subscription */ char *name; /* Name of the subscription */
Oid owner; /* Oid of the subscription owner */ Oid owner; /* Oid of the subscription owner */
bool enabled; /* Indicates if the subscription is enabled */ bool enabled; /* Indicates if the subscription is enabled */

View File

@ -345,7 +345,7 @@ SH_GROW(SH_TYPE *tb, uint32 newsize)
* we need. We neither want tb->members increased, nor do we need to do * we need. We neither want tb->members increased, nor do we need to do
* deal with deleted elements, nor do we need to compare keys. So a * deal with deleted elements, nor do we need to compare keys. So a
* special-cased implementation is lot faster. As resizing can be time * special-cased implementation is lot faster. As resizing can be time
* consuming and frequent, that's worthwile to optimize. * consuming and frequent, that's worthwhile to optimize.
* *
* To be able to simply move entries over, we have to start not at the * To be able to simply move entries over, we have to start not at the
* first bucket (i.e olddata[0]), but find the first bucket that's either * first bucket (i.e olddata[0]), but find the first bucket that's either
@ -620,7 +620,7 @@ SH_DELETE(SH_TYPE *tb, SH_KEY_TYPE key)
/* /*
* Backward shift following elements till either an empty element * Backward shift following elements till either an empty element
* or an element at its optimal position is encounterered. * or an element at its optimal position is encountered.
* *
* While that sounds expensive, the average chain length is short, * While that sounds expensive, the average chain length is short,
* and deletions would otherwise require toombstones. * and deletions would otherwise require toombstones.

View File

@ -842,7 +842,7 @@ typedef LONG slock_t;
#define SPIN_DELAY() spin_delay() #define SPIN_DELAY() spin_delay()
/* If using Visual C++ on Win64, inline assembly is unavailable. /* If using Visual C++ on Win64, inline assembly is unavailable.
* Use a _mm_pause instrinsic instead of rep nop. * Use a _mm_pause intrinsic instead of rep nop.
*/ */
#if defined(_WIN64) #if defined(_WIN64)
static __forceinline void static __forceinline void

View File

@ -147,7 +147,7 @@ typedef struct
} CMPDAffix; } CMPDAffix;
/* /*
* Type of encoding affix flags in Hunspel dictionaries * Type of encoding affix flags in Hunspell dictionaries
*/ */
typedef enum typedef enum
{ {

View File

@ -2,7 +2,7 @@
/* /*
* The aim is to get a simpler interface to the database routines. * The aim is to get a simpler interface to the database routines.
* All the tidieous messing around with tuples is supposed to be hidden * All the tedious messing around with tuples is supposed to be hidden
* by this function. * by this function.
*/ */
/* Author: Linus Tolke /* Author: Linus Tolke

View File

@ -324,7 +324,7 @@ PGTYPESdate_fmt_asc(date dDate, const char *fmtstring, char *outbuf)
* *
* function works as follows: * function works as follows:
* - first we analyze the parameters * - first we analyze the parameters
* - if this is a special case with no delimiters, add delimters * - if this is a special case with no delimiters, add delimiters
* - find the tokens. First we look for numerical values. If we have found * - find the tokens. First we look for numerical values. If we have found
* less than 3 tokens, we check for the months' names and thereafter for * less than 3 tokens, we check for the months' names and thereafter for
* the abbreviations of the months' names. * the abbreviations of the months' names.

View File

@ -1368,11 +1368,11 @@ PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
{ {
/* use cmp_abs function to calculate the result */ /* use cmp_abs function to calculate the result */
/* both are positive: normal comparation with cmp_abs */ /* both are positive: normal comparison with cmp_abs */
if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS) if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS)
return cmp_abs(var1, var2); return cmp_abs(var1, var2);
/* both are negative: return the inverse of the normal comparation */ /* both are negative: return the inverse of the normal comparison */
if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG) if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
{ {
/* /*

Some files were not shown because too many files have changed in this diff Show More