Fix an assortment of typos

Author: Alexander Lakhin
Discussion: https://postgr.es/m/ae9f2fcb-4b24-5bb0-4240-efbbbd944ca1@gmail.com
This commit is contained in:
David Rowley 2024-05-04 02:33:25 +12:00
parent 4a044b9497
commit a42fc1c903
25 changed files with 25 additions and 28 deletions

View File

@ -1086,7 +1086,7 @@ bt_entry_unique_check(BtreeCheckState *state, IndexTuple itup,
/*
* Current tuple has no posting list. If TID is visible save info about it
* for the next comparisons in the loop in bt_page_check(). Report
* for the next comparisons in the loop in bt_target_page_check(). Report
* duplicate if lVis_tid is already valid.
*/
else
@ -1953,7 +1953,7 @@ bt_target_page_check(BtreeCheckState *state)
* Note that !readonly callers must reverify that target page has not
* been concurrently deleted.
*
* Save rightfirstdataoffset for detailed error message.
* Save rightfirstoffset for detailed error message.
*/
static BTScanInsert
bt_right_page_check_scankey(BtreeCheckState *state, OffsetNumber *rightfirstoffset)

View File

@ -331,7 +331,7 @@ make check-world PG_TEST_EXTRA='kerberos ldap ssl load_balance libpq_encryption'
<term><literal>xid_wraparound</literal></term>
<listitem>
<para>
Runs the test suite under <filename>src/test/module/xid_wrapround</filename>.
Runs the test suite under <filename>src/test/modules/xid_wraparound</filename>.
Not enabled by default because it is resource intensive.
</para>
</listitem>

View File

@ -11,7 +11,7 @@ Code Targets:
backend Build backend and related modules
bin Build frontend binaries
contrib Build contrib modules
pl Build procedual languages
pl Build procedural languages
Developer Targets:
reformat-dat-files Rewrite catalog data files into standard format

View File

@ -2598,7 +2598,7 @@ _brin_parallel_heapscan(BrinBuildState *state)
*
* After waiting for all workers to finish, merge the per-worker results into
* the complete index. The results from each worker are sorted by block number
* (start of the page range). While combinig the per-worker results we merge
* (start of the page range). While combining the per-worker results we merge
* summaries for the same page range, and also fill-in empty summaries for
* ranges without any tuples.
*

View File

@ -228,7 +228,6 @@ SimpleLruAutotuneBuffers(int divisor, int max)
* name: name of SLRU. (This is user-visible, pick with care!)
* nslots: number of page slots to use.
* nlsns: number of LSN groups per page (set to zero if not relevant).
* ctllock: LWLock to use to control access to the shared control structure.
* subdir: PGDATA-relative subdirectory that will contain the files.
* buffer_tranche_id: tranche ID to use for the SLRU's per-buffer LWLocks.
* bank_tranche_id: tranche ID to use for the bank LWLocks.

View File

@ -8507,7 +8507,7 @@ xlog_redo(XLogReaderState *record)
/*
* Return the extra open flags used for opening a file, depending on the
* value of the GUCs wal_sync_method, fsync and io_direct.
* value of the GUCs wal_sync_method, fsync and debug_io_direct.
*/
static int
get_sync_bit(int method)

View File

@ -2465,7 +2465,7 @@ AddRelationNewConstraints(Relation rel,
* Check against pre-existing constraints. If we are allowed
* to merge with an existing constraint, there's no more to do
* here. (We omit the duplicate constraint from the result,
* which is what ATAddCheckConstraint wants.)
* which is what ATAddCheckNNConstraint wants.)
*/
if (MergeWithExistingConstraint(rel, ccname, expr,
allow_merge, is_local,

View File

@ -18,7 +18,6 @@
* ExecValuesScan scans a values list.
* ExecValuesNext retrieve next tuple in sequential order.
* ExecInitValuesScan creates and initializes a valuesscan node.
* ExecEndValuesScan releases any storage allocated.
* ExecReScanValuesScan rescans the values list
*/
#include "postgres.h"

View File

@ -57,7 +57,6 @@ static void RecordConstLocation(JumbleState *jstate, int location);
static void _jumbleNode(JumbleState *jstate, Node *node);
static void _jumbleA_Const(JumbleState *jstate, Node *node);
static void _jumbleList(JumbleState *jstate, Node *node);
static void _jumbleRangeTblEntry(JumbleState *jstate, Node *node);
/*
* Given a possibly multi-statement source string, confine our attention to the

View File

@ -1041,8 +1041,8 @@ ValidateSlotSyncParams(int elevel)
/*
* Logical slot sync/creation requires wal_level >= logical.
*
* Sincle altering the wal_level requires a server restart, so error out
* in this case regardless of elevel provided by caller.
* Since altering the wal_level requires a server restart, so error out in
* this case regardless of elevel provided by caller.
*/
if (wal_level < WAL_LEVEL_LOGICAL)
ereport(ERROR,

View File

@ -11632,7 +11632,7 @@ get_xmltable(TableFunc *tf, deparse_context *context, bool showimplicit)
}
/*
* get_json_nested_columns - Parse back nested JSON_TABLE columns
* get_json_table_nested_columns - Parse back nested JSON_TABLE columns
*/
static void
get_json_table_nested_columns(TableFunc *tf, JsonTablePlan *plan,

View File

@ -6284,7 +6284,7 @@ unicode_norm_form_from_string(const char *formstr)
/*
* Returns version of Unicode used by Postgres in "major.minor" format (the
* same format as the Unicode version reported by ICU). The third component
* ("update version") never involves additions to the character repertiore and
* ("update version") never involves additions to the character repertoire and
* is unimportant for most purposes.
*
* See: https://unicode.org/versions/

View File

@ -844,7 +844,7 @@ process_directory_recursively(Oid tsoid,
* files requiring reconstruction. If such files occur outside these
* directories, we want to just copy them straight to the output
* directory. This is to protect against a user creating a file with a
* strange name like INCREMENTAL.config and then compaining that
* strange name like INCREMENTAL.config and then complaining that
* incremental backups don't work properly. The test here is a bit tricky:
* incremental files occur in subdirectories of base, in pg_global itself,
* and in subdirectories of pg_tblspc only if in-place tablespaces are

View File

@ -204,7 +204,7 @@ filter_get_keyword(const char **line, int *size)
}
/*
* read_quoted_pattern - read quoted possibly multi line string
* read_quoted_string - read quoted possibly multi line string
*
* Reads a quoted string which can span over multiple lines and returns a
* pointer to next char after ending double quotes; it will exit on errors.

View File

@ -1945,7 +1945,7 @@ check_old_cluster_subscription_state(void)
" ON o.roname = 'pg_' || s.oid "
"INNER JOIN pg_catalog.pg_database d "
" ON d.oid = s.subdbid "
"WHERE o.roname iS NULL;");
"WHERE o.roname IS NULL;");
ntup = PQntuples(res);
for (int i = 0; i < ntup; i++)

View File

@ -20,7 +20,7 @@ static char *escape_quotes(const char *src);
* return it.
*
* This accepts the dbname which will be appended to the primary_conninfo.
* The dbname will be ignored by walreciever process but slotsync worker uses
* The dbname will be ignored by walreceiver process but slotsync worker uses
* it to connect to the primary server.
*/
PQExpBuffer

View File

@ -116,7 +116,7 @@ extern List *AddRelationNewConstraints(Relation rel,
const char *queryString);
extern List *AddRelationNotNullConstraints(Relation rel,
List *constraints,
List *additional_notnulls);
List *old_notnulls);
extern void RelationClearMissing(Relation rel);
extern void SetAttrMissing(Oid relid, char *attname, char *value);

View File

@ -44,7 +44,7 @@ typedef struct BlockRefTableWriter BlockRefTableWriter;
* report_error_fn should not return.
*/
typedef int (*io_callback_fn) (void *callback_arg, void *data, int length);
typedef void (*report_error_fn) (void *calblack_arg, char *msg,...) pg_attribute_printf(2, 3);
typedef void (*report_error_fn) (void *callback_arg, char *msg,...) pg_attribute_printf(2, 3);
/*

View File

@ -1867,7 +1867,7 @@ typedef struct ForeignPath
*
* We provide a set of hooks here - which the provider must take care to set
* up correctly - to allow extensions to supply their own methods of scanning
* a relation or joing relations. For example, a provider might provide GPU
* a relation or join relations. For example, a provider might provide GPU
* acceleration, a cache-based scan, or some other kind of logic we haven't
* dreamed up yet.
*

View File

@ -1641,7 +1641,7 @@ typedef struct JsonReturning
*
* The actual value is obtained by evaluating formatted_expr. raw_expr is
* only there for displaying the original user-written expression and is not
* evaluated by ExecInterpExpr() and eval_const_exprs_mutator().
* evaluated by ExecInterpExpr() and eval_const_expressions_mutator().
*/
typedef struct JsonValueExpr
{

View File

@ -395,7 +395,7 @@ nogssuser disable disable * connect, authok
# The expected events and outcomes above assume that SSL support
# is enabled. When libpq is compiled without SSL support, all
# attempts to connect with sslmode=require or
# sslnegotition=direct/requiredirectwould fail immediately without
# sslnegotiation=direct/requiredirect would fail immediately without
# even connecting to the server. Skip those, because we tested
# them earlier already.
my ($sslmodes, $sslnegotiations);
@ -552,7 +552,7 @@ done_testing();
### Helper functions
# Test the cube of parameters: user, gssencmode, sslmode, and sslnegotitation
# Test the cube of parameters: user, gssencmode, sslmode, and sslnegotiation
sub test_matrix
{
local $Test::Builder::Level = $Test::Builder::Level + 1;

View File

@ -143,7 +143,7 @@ psql_command(
log_exact => '2',
err_like => [qr/You are welcome/]);
# Try to login as allowed Alice. We don't check the Mallroy login, because
# Try to login as allowed Alice. We don't check the Mallory login, because
# FATAL error could cause a timing-dependant panic of IPC::Run.
psql_command(
$node, 'SELECT 1;', 0, 'try regress_alice',

View File

@ -4,7 +4,7 @@
# Test the JSON parser performance tester. Here we are just checking that
# the performance tester can run, both with the standard parser and the
# incremental parser. An actual performance test will run with thousands
# of iterations onstead of just one.
# of iterations instead of just one.
use strict;
use warnings;

View File

@ -115,7 +115,7 @@ my $noconndb_id = $node->safe_psql('mydb',
my $log_offset = -s $node->logfile;
# worker_spi_launch() may be able to detect that the worker has been
# stopped, so do not rely on psql_safe().
# stopped, so do not rely on safe_psql().
$node->psql('postgres',
qq[SELECT worker_spi_launch(12, $noconndb_id, $myrole_id);]);
$node->wait_for_log(

View File

@ -384,7 +384,7 @@ _PG_init(void)
/*
* Now fill in worker-specific data, and do the actual registrations.
*
* bgw_extra can optionally include a dabatase OID, a role OID and a set
* bgw_extra can optionally include a database OID, a role OID and a set
* of flags. This is left empty here to fallback to the related GUCs at
* startup (0 for the bgworker flags).
*/