Fix typos in comments and in one isolation test.

Dagfinn Ilmari Mannsåker, reviewed by Shubham Khanna. Some subtractions
by me.

Discussion: http://postgr.es/m/87le9fmi01.fsf@wibble.ilmari.org
This commit is contained in:
Robert Haas 2024-01-02 11:56:02 -05:00
parent 5c430f9dc5
commit 0d9937d118
29 changed files with 45 additions and 45 deletions

View File

@ -127,7 +127,7 @@ typedef struct BloomMetaPageData
FreeBlockNumberArray notFullPage; FreeBlockNumberArray notFullPage;
} BloomMetaPageData; } BloomMetaPageData;
/* Magic number to distinguish bloom pages among anothers */ /* Magic number to distinguish bloom pages from others */
#define BLOOM_MAGICK_NUMBER (0xDBAC0DED) #define BLOOM_MAGICK_NUMBER (0xDBAC0DED)
/* Number of blocks numbers fit in BloomMetaPageData */ /* Number of blocks numbers fit in BloomMetaPageData */

View File

@ -60,7 +60,7 @@ WITH random_string AS
-- This generates a random string of 16366 bytes. This is chosen -- This generates a random string of 16366 bytes. This is chosen
-- as random so that it does not get compressed, and the decompression -- as random so that it does not get compressed, and the decompression
-- would work on a string with the same length as the origin, making the -- would work on a string with the same length as the origin, making the
-- test behavior more predictible. lpad() ensures that the generated -- test behavior more predictable. lpad() ensures that the generated
-- hexadecimal value is completed by extra zero characters if random() -- hexadecimal value is completed by extra zero characters if random()
-- has generated a value strictly lower than 16. -- has generated a value strictly lower than 16.
SELECT string_agg(decode(lpad(to_hex((random()*256)::int), 2, '0'), 'hex'), '') as bytes SELECT string_agg(decode(lpad(to_hex((random()*256)::int), 2, '0'), 'hex'), '') as bytes

View File

@ -460,7 +460,7 @@ bf_init(PX_Cipher *c, const uint8 *key, unsigned klen, const uint8 *iv)
/* /*
* Test if key len is supported. BF_set_key silently cut large keys and it * Test if key len is supported. BF_set_key silently cut large keys and it
* could be a problem when user transfer crypted data from one server to * could be a problem when user transfer encrypted data from one server to
* another. * another.
*/ */

View File

@ -36,7 +36,7 @@ WITH random_string AS
-- This generates a random string of 16366 bytes. This is chosen -- This generates a random string of 16366 bytes. This is chosen
-- as random so that it does not get compressed, and the decompression -- as random so that it does not get compressed, and the decompression
-- would work on a string with the same length as the origin, making the -- would work on a string with the same length as the origin, making the
-- test behavior more predictible. lpad() ensures that the generated -- test behavior more predictable. lpad() ensures that the generated
-- hexadecimal value is completed by extra zero characters if random() -- hexadecimal value is completed by extra zero characters if random()
-- has generated a value strictly lower than 16. -- has generated a value strictly lower than 16.
SELECT string_agg(decode(lpad(to_hex((random()*256)::int), 2, '0'), 'hex'), '') as bytes SELECT string_agg(decode(lpad(to_hex((random()*256)::int), 2, '0'), 'hex'), '') as bytes

View File

@ -4819,7 +4819,7 @@ SELECT * FROM ft2 ftupper WHERE
925 | 5 | 00925 | Mon Jan 26 00:00:00 1970 PST | Mon Jan 26 00:00:00 1970 | 5 | 5 | foo 925 | 5 | 00925 | Mon Jan 26 00:00:00 1970 PST | Mon Jan 26 00:00:00 1970 | 5 | 5 | foo
(10 rows) (10 rows)
-- EXISTS should be propogated to the highest upper inner join -- EXISTS should be propagated to the highest upper inner join
EXPLAIN (verbose, costs off) EXPLAIN (verbose, costs off)
SELECT ft2.*, ft4.* FROM ft2 INNER JOIN SELECT ft2.*, ft4.* FROM ft2 INNER JOIN
(SELECT * FROM ft4 WHERE EXISTS ( (SELECT * FROM ft4 WHERE EXISTS (

View File

@ -1399,7 +1399,7 @@ SELECT * FROM ft2 ftupper WHERE
AND ftupper.c1 > 900 AND ftupper.c1 > 900
ORDER BY ftupper.c1 LIMIT 10; ORDER BY ftupper.c1 LIMIT 10;
-- EXISTS should be propogated to the highest upper inner join -- EXISTS should be propagated to the highest upper inner join
EXPLAIN (verbose, costs off) EXPLAIN (verbose, costs off)
SELECT ft2.*, ft4.* FROM ft2 INNER JOIN SELECT ft2.*, ft4.* FROM ft2 INNER JOIN
(SELECT * FROM ft4 WHERE EXISTS ( (SELECT * FROM ft4 WHERE EXISTS (

View File

@ -348,7 +348,7 @@ brininsert(Relation idxRel, Datum *values, bool *nulls,
bool autosummarize = BrinGetAutoSummarize(idxRel); bool autosummarize = BrinGetAutoSummarize(idxRel);
/* /*
* If firt time through in this statement, initialize the insert state * If first time through in this statement, initialize the insert state
* that we keep for all the inserts in the command. * that we keep for all the inserts in the command.
*/ */
if (!bistate) if (!bistate)
@ -1042,7 +1042,7 @@ brinbuildCallbackParallel(Relation index,
/* /*
* If we're in a block that belongs to a different range, summarize what * If we're in a block that belongs to a different range, summarize what
* we've got and start afresh. Note the scan might have skipped many * we've got and start afresh. Note the scan might have skipped many
* pages, if they were devoid of live tuples; we do not create emptry BRIN * pages, if they were devoid of live tuples; we do not create empty BRIN
* ranges here - the leader is responsible for filling them in. * ranges here - the leader is responsible for filling them in.
* *
* Unlike serial builds, parallel index builds allow synchronized seqscans * Unlike serial builds, parallel index builds allow synchronized seqscans
@ -2149,7 +2149,7 @@ union_tuples(BrinDesc *bdesc, BrinMemTuple *a, BrinTuple *b)
* brin_vacuum_scan * brin_vacuum_scan
* Do a complete scan of the index during VACUUM. * Do a complete scan of the index during VACUUM.
* *
* This routine scans the complete index looking for uncatalogued index pages, * This routine scans the complete index looking for uncataloged index pages,
* i.e. those that might have been lost due to a crash after index extension * i.e. those that might have been lost due to a crash after index extension
* and such. * and such.
*/ */

View File

@ -85,7 +85,7 @@
((att)->attstorage != TYPSTORAGE_PLAIN) ((att)->attstorage != TYPSTORAGE_PLAIN)
/* /*
* Setup for cacheing pass-by-ref missing attributes in a way that survives * Setup for caching pass-by-ref missing attributes in a way that survives
* tupleDesc destruction. * tupleDesc destruction.
*/ */

View File

@ -158,7 +158,7 @@ btbuildempty(Relation index)
Page metapage; Page metapage;
/* /*
* Initalize the metapage. * Initialize the metapage.
* *
* Regular index build bypasses the buffer manager and uses smgr functions * Regular index build bypasses the buffer manager and uses smgr functions
* directly, with an smgrimmedsync() call at the end. That makes sense * directly, with an smgrimmedsync() call at the end. That makes sense

View File

@ -4218,7 +4218,7 @@ cachedNamespacePath(const char *searchPath, Oid roleid)
entry = spcache_insert(searchPath, roleid); entry = spcache_insert(searchPath, roleid);
/* /*
* An OOM may have resulted in a cache entry with mising 'oidlist' or * An OOM may have resulted in a cache entry with missing 'oidlist' or
* 'finalPath', so just compute whatever is missing. * 'finalPath', so just compute whatever is missing.
*/ */

View File

@ -1290,7 +1290,7 @@ get_relation_constraint_attnos(Oid relid, const char *conname,
/* /*
* Return the OID of the constraint enforced by the given index in the * Return the OID of the constraint enforced by the given index in the
* given relation; or InvalidOid if no such index is catalogued. * given relation; or InvalidOid if no such index is cataloged.
* *
* Much like get_constraint_index, this function is concerned only with the * Much like get_constraint_index, this function is concerned only with the
* one constraint that "owns" the given index. Therefore, constraints of * one constraint that "owns" the given index. Therefore, constraints of

View File

@ -387,7 +387,7 @@ SetDatatabaseHasLoginEventTriggers(void)
HeapTuple tuple; HeapTuple tuple;
/* /*
* Use shared lock to prevent a conflit with EventTriggerOnLogin() trying * Use shared lock to prevent a conflict with EventTriggerOnLogin() trying
* to reset pg_database.dathasloginevt flag. Note, this lock doesn't * to reset pg_database.dathasloginevt flag. Note, this lock doesn't
* effectively blocks database or other objection. It's just custom lock * effectively blocks database or other objection. It's just custom lock
* tag used to prevent multiple backends changing * tag used to prevent multiple backends changing

View File

@ -1849,7 +1849,7 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
econtext->ecxt_scantuple = slot; econtext->ecxt_scantuple = slot;
/* /*
* As in case of the catalogued constraints, we treat a NULL result as * As in case of the cataloged constraints, we treat a NULL result as
* success here, not a failure. * success here, not a failure.
*/ */
success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext); success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);

View File

@ -1928,8 +1928,8 @@ deconstruct_distribute_oj_quals(PlannerInfo *root,
* jtitems list to be ordered that way. * jtitems list to be ordered that way.
* *
* We first strip out all the nullingrels bits corresponding to * We first strip out all the nullingrels bits corresponding to
* commutating joins below this one, and then successively put them * commuting joins below this one, and then successively put them back
* back as we crawl up the join stack. * as we crawl up the join stack.
*/ */
quals = jtitem->oj_joinclauses; quals = jtitem->oj_joinclauses;
if (!bms_is_empty(joins_below)) if (!bms_is_empty(joins_below))

View File

@ -2608,7 +2608,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, const RangeType *r, Datum
* values into a range object. They are modeled after heaptuple.c's * values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle * heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as * null values here. TYPE_IS_PACKABLE must test the same conditions as
* heaptuple.c's ATT_IS_PACKABLE macro. See the comments thare for more * heaptuple.c's ATT_IS_PACKABLE macro. See the comments there for more
* details. * details.
*/ */

View File

@ -93,7 +93,7 @@ static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
static void freestate_cluster(Tuplesortstate *state); static void freestate_cluster(Tuplesortstate *state);
/* /*
* Data struture pointed by "TuplesortPublic.arg" for the CLUSTER case. Set by * Data structure pointed by "TuplesortPublic.arg" for the CLUSTER case. Set by
* the tuplesort_begin_cluster. * the tuplesort_begin_cluster.
*/ */
typedef struct typedef struct
@ -105,7 +105,7 @@ typedef struct
} TuplesortClusterArg; } TuplesortClusterArg;
/* /*
* Data struture pointed by "TuplesortPublic.arg" for the IndexTuple case. * Data structure pointed by "TuplesortPublic.arg" for the IndexTuple case.
* Set by tuplesort_begin_index_xxx and used only by the IndexTuple routines. * Set by tuplesort_begin_index_xxx and used only by the IndexTuple routines.
*/ */
typedef struct typedef struct
@ -115,7 +115,7 @@ typedef struct
} TuplesortIndexArg; } TuplesortIndexArg;
/* /*
* Data struture pointed by "TuplesortPublic.arg" for the index_btree subcase. * Data structure pointed by "TuplesortPublic.arg" for the index_btree subcase.
*/ */
typedef struct typedef struct
{ {
@ -126,7 +126,7 @@ typedef struct
} TuplesortIndexBTreeArg; } TuplesortIndexBTreeArg;
/* /*
* Data struture pointed by "TuplesortPublic.arg" for the index_hash subcase. * Data structure pointed by "TuplesortPublic.arg" for the index_hash subcase.
*/ */
typedef struct typedef struct
{ {
@ -138,7 +138,7 @@ typedef struct
} TuplesortIndexHashArg; } TuplesortIndexHashArg;
/* /*
* Data struture pointed by "TuplesortPublic.arg" for the Datum case. * Data structure pointed by "TuplesortPublic.arg" for the Datum case.
* Set by tuplesort_begin_datum and used only by the DatumTuple routines. * Set by tuplesort_begin_datum and used only by the DatumTuple routines.
*/ */
typedef struct typedef struct

View File

@ -4,7 +4,7 @@
* Combo command ID support routines * Combo command ID support routines
* *
* Before version 8.3, HeapTupleHeaderData had separate fields for cmin * Before version 8.3, HeapTupleHeaderData had separate fields for cmin
* and cmax. To reduce the header size, cmin and cmax are now overlayed * and cmax. To reduce the header size, cmin and cmax are now overlaid
* in the same field in the header. That usually works because you rarely * in the same field in the header. That usually works because you rarely
* insert and delete a tuple in the same transaction, and we don't need * insert and delete a tuple in the same transaction, and we don't need
* either field to remain valid after the originating transaction exits. * either field to remain valid after the originating transaction exits.

View File

@ -60,7 +60,7 @@ sub run_test
# Insert a row in the old primary. This causes the primary and standby # Insert a row in the old primary. This causes the primary and standby
# to have "diverged", it's no longer possible to just apply the # to have "diverged", it's no longer possible to just apply the
# standy's logs over primary directory - you need to rewind. # standby's logs over primary directory - you need to rewind.
primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')"); primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')");
# Also insert a new row in the standby, which won't be present in the # Also insert a new row in the standby, which won't be present in the

View File

@ -52,7 +52,7 @@ sub run_test
# Insert a row in the old primary. This causes the primary and standby # Insert a row in the old primary. This causes the primary and standby
# to have "diverged", it's no longer possible to just apply the # to have "diverged", it's no longer possible to just apply the
# standy's logs over primary directory - you need to rewind. # standby's logs over primary directory - you need to rewind.
primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')"); primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')");
# Also insert a new row in the standby, which won't be present in the # Also insert a new row in the standby, which won't be present in the

View File

@ -86,7 +86,7 @@ $node_c->promote;
# Insert a row in A. This causes A/B and C to have "diverged", so that it's # Insert a row in A. This causes A/B and C to have "diverged", so that it's
# no longer possible to just apply the standy's logs over primary directory # no longer possible to just apply the standby's logs over primary directory
# - you need to rewind. # - you need to rewind.
$node_a->safe_psql('postgres', $node_a->safe_psql('postgres',
"INSERT INTO tbl1 VALUES ('in A, after C was promoted')"); "INSERT INTO tbl1 VALUES ('in A, after C was promoted')");

View File

@ -28,7 +28,7 @@ primary_psql('CHECKPOINT');
RewindTest::promote_standby(); RewindTest::promote_standby();
# Insert a row in the old primary. This causes the primary and standby to have # Insert a row in the old primary. This causes the primary and standby to have
# "diverged", it's no longer possible to just apply the standy's logs over # "diverged", it's no longer possible to just apply the standby's logs over
# primary directory - you need to rewind. Also insert a new row in the # primary directory - you need to rewind. Also insert a new row in the
# standby, which won't be present in the old primary. # standby, which won't be present in the old primary.
primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')"); primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')");

View File

@ -337,7 +337,7 @@
/* /*
* Define this to force Bitmapset reallocation on each modification. Helps * Define this to force Bitmapset reallocation on each modification. Helps
* to find hangling pointers to Bitmapset's. * to find dangling pointers to Bitmapset's.
*/ */
/* #define REALLOCATE_BITMAPSETS */ /* #define REALLOCATE_BITMAPSETS */

View File

@ -543,10 +543,10 @@ permutation
s1_table_insert s1_table_insert
s1_begin s1_begin
s1_table_update_k1 # should *not* be counted, different rel s1_table_update_k1 # should *not* be counted, different rel
s1_table_update_k1 # dito s1_table_update_k1 # ditto
s1_table_truncate s1_table_truncate
s1_table_insert_k1 # should be counted s1_table_insert_k1 # should be counted
s1_table_update_k1 # dito s1_table_update_k1 # ditto
s1_prepare_a s1_prepare_a
s1_commit_prepared_a s1_commit_prepared_a
s1_ff s1_ff
@ -557,10 +557,10 @@ permutation
s1_table_insert s1_table_insert
s1_begin s1_begin
s1_table_update_k1 # should *not* be counted, different rel s1_table_update_k1 # should *not* be counted, different rel
s1_table_update_k1 # dito s1_table_update_k1 # ditto
s1_table_truncate s1_table_truncate
s1_table_insert_k1 # should be counted s1_table_insert_k1 # should be counted
s1_table_update_k1 # dito s1_table_update_k1 # ditto
s1_prepare_a s1_prepare_a
s1_ff # flush out non-transactional stats, might happen anyway s1_ff # flush out non-transactional stats, might happen anyway
s2_commit_prepared_a s2_commit_prepared_a
@ -572,10 +572,10 @@ permutation
s1_table_insert s1_table_insert
s1_begin s1_begin
s1_table_update_k1 # should be counted s1_table_update_k1 # should be counted
s1_table_update_k1 # dito s1_table_update_k1 # ditto
s1_table_truncate s1_table_truncate
s1_table_insert_k1 # should *not* be counted, different rel s1_table_insert_k1 # should *not* be counted, different rel
s1_table_update_k1 # dito s1_table_update_k1 # ditto
s1_prepare_a s1_prepare_a
s1_rollback_prepared_a s1_rollback_prepared_a
s1_ff s1_ff
@ -586,10 +586,10 @@ permutation
s1_table_insert s1_table_insert
s1_begin s1_begin
s1_table_update_k1 # should be counted s1_table_update_k1 # should be counted
s1_table_update_k1 # dito s1_table_update_k1 # ditto
s1_table_truncate s1_table_truncate
s1_table_insert_k1 # should *not* be counted, different rel s1_table_insert_k1 # should *not* be counted, different rel
s1_table_update_k1 # dito s1_table_update_k1 # ditto
s1_prepare_a s1_prepare_a
s2_rollback_prepared_a s2_rollback_prepared_a
s1_ff s2_ff s1_ff s2_ff

View File

@ -486,7 +486,7 @@ FROM booltbl3 ORDER BY o;
-- Test to make sure short-circuiting and NULL handling is -- Test to make sure short-circuiting and NULL handling is
-- correct. Use a table as source to prevent constant simplification -- correct. Use a table as source to prevent constant simplification
-- to interfer. -- from interfering.
CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool); CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool);
INSERT INTO booltbl4 VALUES (false, true, null); INSERT INTO booltbl4 VALUES (false, true, null);
\pset null '(null)' \pset null '(null)'

View File

@ -826,11 +826,11 @@ RESET enable_seqscan;
-- test overflows during CREATE INDEX with extreme timestamp values -- test overflows during CREATE INDEX with extreme timestamp values
CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ); CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ);
SET datestyle TO iso; SET datestyle TO iso;
-- values close to timetamp minimum -- values close to timestamp minimum
INSERT INTO brin_timestamp_test INSERT INTO brin_timestamp_test
SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval
FROM generate_series(1,30) s(i); FROM generate_series(1,30) s(i);
-- values close to timetamp maximum -- values close to timestamp maximum
INSERT INTO brin_timestamp_test INSERT INTO brin_timestamp_test
SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval
FROM generate_series(1,30) s(i); FROM generate_series(1,30) s(i);

View File

@ -6957,7 +6957,7 @@ WHERE q0.a = 1;
(7 rows) (7 rows)
-- --
---- Only one side is unqiue ---- Only one side is unique
--select * from sl t1, sl t2 where t1.a = t2.a and t1.b = 1; --select * from sl t1, sl t2 where t1.a = t2.a and t1.b = 1;
--select * from sl t1, sl t2 where t1.a = t2.a and t2.b = 1; --select * from sl t1, sl t2 where t1.a = t2.a and t2.b = 1;
-- --

View File

@ -227,7 +227,7 @@ FROM booltbl3 ORDER BY o;
-- Test to make sure short-circuiting and NULL handling is -- Test to make sure short-circuiting and NULL handling is
-- correct. Use a table as source to prevent constant simplification -- correct. Use a table as source to prevent constant simplification
-- to interfer. -- from interfering.
CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool); CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool);
INSERT INTO booltbl4 VALUES (false, true, null); INSERT INTO booltbl4 VALUES (false, true, null);
\pset null '(null)' \pset null '(null)'

View File

@ -592,12 +592,12 @@ CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ);
SET datestyle TO iso; SET datestyle TO iso;
-- values close to timetamp minimum -- values close to timestamp minimum
INSERT INTO brin_timestamp_test INSERT INTO brin_timestamp_test
SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval
FROM generate_series(1,30) s(i); FROM generate_series(1,30) s(i);
-- values close to timetamp maximum -- values close to timestamp maximum
INSERT INTO brin_timestamp_test INSERT INTO brin_timestamp_test
SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval
FROM generate_series(1,30) s(i); FROM generate_series(1,30) s(i);

View File

@ -2656,7 +2656,7 @@ SELECT * FROM
WHERE q0.a = 1; WHERE q0.a = 1;
-- --
---- Only one side is unqiue ---- Only one side is unique
--select * from sl t1, sl t2 where t1.a = t2.a and t1.b = 1; --select * from sl t1, sl t2 where t1.a = t2.a and t1.b = 1;
--select * from sl t1, sl t2 where t1.a = t2.a and t2.b = 1; --select * from sl t1, sl t2 where t1.a = t2.a and t2.b = 1;
-- --