From 49f2194ed5c1f6274ac807fb033880aee0b0d1c3 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Thu, 21 Dec 2023 15:36:17 -0500 Subject: [PATCH] Fix numerous typos in incremental backup commits. Apparently, spell check would have been a really good idea. Alexander Lakhin, with a few additions as per an off-list report from Andres Freund. Discussion: http://postgr.es/m/f08f7c60-1ad3-0b57-d580-54b11f07cddf@gmail.com --- doc/src/sgml/ref/pg_basebackup.sgml | 2 +- doc/src/sgml/ref/pg_combinebackup.sgml | 2 +- src/backend/backup/basebackup_incremental.c | 6 +++--- src/backend/backup/walsummaryfuncs.c | 4 ++-- src/backend/postmaster/walsummarizer.c | 21 +++++++++---------- src/backend/replication/walsender.c | 4 ++-- src/bin/pg_combinebackup/pg_combinebackup.c | 6 +++--- .../pg_combinebackup/t/002_compare_backups.pl | 2 +- src/bin/pg_combinebackup/t/004_manifest.pl | 2 +- src/bin/pg_combinebackup/write_manifest.c | 2 +- src/common/blkreftable.c | 12 +++++------ src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_proc.dat | 2 +- 13 files changed, 33 insertions(+), 34 deletions(-) diff --git a/doc/src/sgml/ref/pg_basebackup.sgml b/doc/src/sgml/ref/pg_basebackup.sgml index 7c183a5cfd..e411ddbf45 100644 --- a/doc/src/sgml/ref/pg_basebackup.sgml +++ b/doc/src/sgml/ref/pg_basebackup.sgml @@ -213,7 +213,7 @@ PostgreSQL documentation - + Performs an incremental diff --git a/doc/src/sgml/ref/pg_combinebackup.sgml b/doc/src/sgml/ref/pg_combinebackup.sgml index e1cb31607e..8a0a600c2b 100644 --- a/doc/src/sgml/ref/pg_combinebackup.sgml +++ b/doc/src/sgml/ref/pg_combinebackup.sgml @@ -83,7 +83,7 @@ PostgreSQL documentation The / option instructs - pg_cominebackup to figure out what would be done + pg_combinebackup to figure out what would be done without actually creating the target directory or any output files. It is particularly useful in combination with . diff --git a/src/backend/backup/basebackup_incremental.c b/src/backend/backup/basebackup_incremental.c index 1e5a5ac33a..42bbe564e2 100644 --- a/src/backend/backup/basebackup_incremental.c +++ b/src/backend/backup/basebackup_incremental.c @@ -158,7 +158,7 @@ CreateIncrementalBackupInfo(MemoryContext mcxt) /* * Before taking an incremental backup, the caller must supply the backup - * manifest from a prior backup. Each chunk of manifest data recieved + * manifest from a prior backup. Each chunk of manifest data received * from the client should be passed to this function. */ void @@ -462,7 +462,7 @@ PrepareForIncrementalBackup(IncrementalBackupInfo *ib, ++deadcycles; /* - * If we've managed to wait for an entire minute withot the WAL + * If we've managed to wait for an entire minute without the WAL * summarizer absorbing a single WAL record, error out; probably * something is wrong. * @@ -473,7 +473,7 @@ PrepareForIncrementalBackup(IncrementalBackupInfo *ib, * likely to catch a reasonable number of the things that can go wrong * in practice (e.g. the summarizer process is completely hung, say * because somebody hooked up a debugger to it or something) without - * giving up too quickly when the sytem is just slow. + * giving up too quickly when the system is just slow. */ if (deadcycles >= 6) ereport(ERROR, diff --git a/src/backend/backup/walsummaryfuncs.c b/src/backend/backup/walsummaryfuncs.c index a1f69ad4ba..f96491534d 100644 --- a/src/backend/backup/walsummaryfuncs.c +++ b/src/backend/backup/walsummaryfuncs.c @@ -92,7 +92,7 @@ pg_wal_summary_contents(PG_FUNCTION_ARGS) errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid timeline %lld", (long long) raw_tli)); - /* Prepare to read the specified WAL summry file. */ + /* Prepare to read the specified WAL summary file. */ ws.tli = (TimeLineID) raw_tli; ws.start_lsn = PG_GETARG_LSN(1); ws.end_lsn = PG_GETARG_LSN(2); @@ -143,7 +143,7 @@ pg_wal_summary_contents(PG_FUNCTION_ARGS) } /* - * If the limit block is not InvalidBlockNumber, emit an exta row + * If the limit block is not InvalidBlockNumber, emit an extra row * with that block number and limit_block = true. * * There is no point in doing this when the limit_block is diff --git a/src/backend/postmaster/walsummarizer.c b/src/backend/postmaster/walsummarizer.c index 9fa155349e..81e5ac8283 100644 --- a/src/backend/postmaster/walsummarizer.c +++ b/src/backend/postmaster/walsummarizer.c @@ -87,7 +87,7 @@ typedef struct XLogRecPtr pending_lsn; /* - * This field handles its own synchronizaton. + * This field handles its own synchronization. */ ConditionVariable summary_file_cv; } WalSummarizerData; @@ -117,7 +117,7 @@ static long sleep_quanta = 1; /* * The sleep time will always be a multiple of 200ms and will not exceed * thirty seconds (150 * 200 = 30 * 1000). Note that the timeout here needs - * to be substntially less than the maximum amount of time for which an + * to be substantially less than the maximum amount of time for which an * incremental backup will wait for this process to catch up. Otherwise, an * incremental backup might time out on an idle system just because we sleep * for too long. @@ -212,7 +212,7 @@ WalSummarizerMain(void) /* * Within this function, 'current_lsn' and 'current_tli' refer to the * point from which the next WAL summary file should start. 'exact' is - * true if 'current_lsn' is known to be the start of a WAL recod or WAL + * true if 'current_lsn' is known to be the start of a WAL record or WAL * segment, and false if it might be in the middle of a record someplace. * * 'switch_lsn' and 'switch_tli', if set, are the LSN at which we need to @@ -297,7 +297,7 @@ WalSummarizerMain(void) /* * Sleep for 10 seconds before attempting to resume operations in - * order to avoid excessing logging. + * order to avoid excessive logging. * * Many of the likely error conditions are things that will repeat * every time. For example, if the WAL can't be read or the summary @@ -449,7 +449,7 @@ GetOldestUnsummarizedLSN(TimeLineID *tli, bool *lsn_is_exact, return InvalidXLogRecPtr; /* - * Unless we need to reset the pending_lsn, we initally acquire the lock + * Unless we need to reset the pending_lsn, we initially acquire the lock * in shared mode and try to fetch the required information. If we acquire * in shared mode and find that the data structure hasn't been * initialized, we reacquire the lock in exclusive mode so that we can @@ -699,7 +699,7 @@ HandleWalSummarizerInterrupts(void) * * 'start_lsn' is the point at which we should start summarizing. If this * value comes from the end LSN of the previous record as returned by the - * xlograder machinery, 'exact' should be true; otherwise, 'exact' should + * xlogreader machinery, 'exact' should be true; otherwise, 'exact' should * be false, and this function will search forward for the start of a valid * WAL record. * @@ -872,7 +872,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact, xlogreader->ReadRecPtr >= switch_lsn) { /* - * Woops! We've read a record that *starts* after the switch LSN, + * Whoops! We've read a record that *starts* after the switch LSN, * contrary to our goal of reading only until we hit the first * record that ends at or after the switch LSN. Pretend we didn't * read it after all by bailing out of this loop right here, @@ -1061,7 +1061,7 @@ SummarizeSmgrRecord(XLogReaderState *xlogreader, BlockRefTable *brtab) } /* - * Special handling for WAL recods with RM_XACT_ID. + * Special handling for WAL records with RM_XACT_ID. */ static void SummarizeXactRecord(XLogReaderState *xlogreader, BlockRefTable *brtab) @@ -1116,7 +1116,7 @@ SummarizeXactRecord(XLogReaderState *xlogreader, BlockRefTable *brtab) } /* - * Special handling for WAL recods with RM_XLOG_ID. + * Special handling for WAL records with RM_XLOG_ID. */ static bool SummarizeXlogRecord(XLogReaderState *xlogreader) @@ -1294,8 +1294,7 @@ summarizer_wait_for_wal(void) * sleep time to the minimum, but we don't want a handful of extra WAL * records to provoke a strong reaction. We choose to reduce the sleep * time by 1 quantum for each page read beyond the first, which is a - * fairly arbitrary way of trying to be reactive without - * overrreacting. + * fairly arbitrary way of trying to be reactive without overreacting. */ if (pages_read_since_last_sleep > sleep_quanta - 1) sleep_quanta = 1; diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index dbcda32554..d4aa9e1c96 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -706,7 +706,7 @@ UploadManifest(void) pq_endmessage_reuse(&buf); pq_flush(); - /* Recieve packets from client until done. */ + /* Receive packets from client until done. */ while (HandleUploadManifestPacket(&buf, &offset, ib)) ; @@ -719,7 +719,7 @@ UploadManifest(void) * * We assume that MemoryContextDelete and MemoryContextSetParent won't * fail, and thus we shouldn't end up bailing out of here in such a way as - * to leave dangling pointrs. + * to leave dangling pointers. */ if (uploaded_manifest_mcxt != NULL) MemoryContextDelete(uploaded_manifest_mcxt); diff --git a/src/bin/pg_combinebackup/pg_combinebackup.c b/src/bin/pg_combinebackup/pg_combinebackup.c index 85d3f4e5de..b6ae6f2aef 100644 --- a/src/bin/pg_combinebackup/pg_combinebackup.c +++ b/src/bin/pg_combinebackup/pg_combinebackup.c @@ -454,7 +454,7 @@ check_backup_label_files(int n_backups, char **backup_dirs) * The exact size limit that we impose here doesn't really matter -- * most of what's supposed to be in the file is fixed size and quite * short. However, the length of the backup_label is limited (at least - * by some parts of the code) to MAXGPATH, so include that value in + * by some parts of the code) to MAXPGPATH, so include that value in * the maximum length that we tolerate. */ slurp_file(fd, pathbuf, buf, 10000 + MAXPGPATH); @@ -1192,7 +1192,7 @@ scan_for_existing_tablespaces(char *pathname, cb_options *opt) if (!is_absolute_path(link_target)) pg_fatal("symbolic link \"%s\" is relative", tblspcdir); - /* Caonicalize the link target. */ + /* Canonicalize the link target. */ canonicalize_path(link_target); /* @@ -1222,7 +1222,7 @@ scan_for_existing_tablespaces(char *pathname, cb_options *opt) * we just record the paths within the data directories. */ snprintf(ts->old_dir, MAXPGPATH, "%s/%s", pg_tblspc, de->d_name); - snprintf(ts->new_dir, MAXPGPATH, "%s/pg_tblpc/%s", opt->output, + snprintf(ts->new_dir, MAXPGPATH, "%s/pg_tblspc/%s", opt->output, de->d_name); ts->in_place = true; } diff --git a/src/bin/pg_combinebackup/t/002_compare_backups.pl b/src/bin/pg_combinebackup/t/002_compare_backups.pl index 0b80455aff..99a4f5ba2d 100644 --- a/src/bin/pg_combinebackup/t/002_compare_backups.pl +++ b/src/bin/pg_combinebackup/t/002_compare_backups.pl @@ -113,7 +113,7 @@ $pitr2->poll_query_until('postgres', # It would be much nicer if we could physically compare the data files, but # that doesn't really work. The contents of the page hole aren't guaranteed to # be identical, and there can be other discrepancies as well. To make this work -# we'd need the equivalent of each AM's rm_mask functon written or at least +# we'd need the equivalent of each AM's rm_mask function written or at least # callable from Perl, and that doesn't seem practical. # # NB: We're just using the primary's backup directory for scratch space here. diff --git a/src/bin/pg_combinebackup/t/004_manifest.pl b/src/bin/pg_combinebackup/t/004_manifest.pl index 37de61ac06..4f3779274f 100644 --- a/src/bin/pg_combinebackup/t/004_manifest.pl +++ b/src/bin/pg_combinebackup/t/004_manifest.pl @@ -69,7 +69,7 @@ my $nocsum_manifest = slurp_file($node->backup_dir . '/csum_none/backup_manifest'); my $nocsum_count = (() = $nocsum_manifest =~ /Checksum-Algorithm/mig); is($nocsum_count, 0, - "Checksum_Algorithm is not mentioned in no-checksum manifest"); + "Checksum-Algorithm is not mentioned in no-checksum manifest"); # OK, that's all. done_testing(); diff --git a/src/bin/pg_combinebackup/write_manifest.c b/src/bin/pg_combinebackup/write_manifest.c index 82160134d8..5cf36c2b05 100644 --- a/src/bin/pg_combinebackup/write_manifest.c +++ b/src/bin/pg_combinebackup/write_manifest.c @@ -272,7 +272,7 @@ flush_manifest(manifest_writer *mwriter) } /* - * Encode bytes using two hexademical digits for each one. + * Encode bytes using two hexadecimal digits for each one. */ static size_t hex_encode(const uint8 *src, size_t len, char *dst) diff --git a/src/common/blkreftable.c b/src/common/blkreftable.c index 21ee6f5968..d0ecaa05ef 100644 --- a/src/common/blkreftable.c +++ b/src/common/blkreftable.c @@ -100,7 +100,7 @@ typedef uint16 *BlockRefTableChunk; * 'chunk_size' is an array storing the allocated size of each chunk. * * 'chunk_usage' is an array storing the number of elements used in each - * chunk. If that value is less than MAX_ENTRIES_PER_CHUNK, the corresonding + * chunk. If that value is less than MAX_ENTRIES_PER_CHUNK, the corresponding * chunk is used as an array; else the corresponding chunk is used as a bitmap. * When used as a bitmap, the least significant bit of the first array element * is the status of the lowest-numbered block covered by this chunk. @@ -183,7 +183,7 @@ typedef struct BlockRefTableBuffer * table reference file from disk. * * total_chunks means the number of chunks for the RelFileLocator/ForkNumber - * combination that is curently being read, and consumed_chunks is the number + * combination that is currently being read, and consumed_chunks is the number * of those that have been read. (We always read all the information for * a single chunk at one time, so we don't need to be able to represent the * state where a chunk has been partially read.) @@ -567,7 +567,7 @@ WriteBlockRefTable(BlockRefTable *brtab, * malformed. This is not used for I/O errors, which must be handled internally * by read_callback. * - * 'error_callback_arg' is an opaque arguent to be passed to error_callback. + * 'error_callback_arg' is an opaque argument to be passed to error_callback. */ BlockRefTableReader * CreateBlockRefTableReader(io_callback_fn read_callback, @@ -922,7 +922,7 @@ BlockRefTableEntrySetLimitBlock(BlockRefTableEntry *entry, /* * Next, we need to discard any offsets within the chunk that would - * contain the limit_block. We must handle this differenly depending on + * contain the limit_block. We must handle this differently depending on * whether the chunk that would contain limit_block is a bitmap or an * array of offsets. */ @@ -955,7 +955,7 @@ BlockRefTableEntrySetLimitBlock(BlockRefTableEntry *entry, } /* - * Mark a block in a given BlkRefTableEntry as known to have been modified. + * Mark a block in a given BlockRefTableEntry as known to have been modified. */ void BlockRefTableEntryMarkBlockModified(BlockRefTableEntry *entry, @@ -1112,7 +1112,7 @@ BlockRefTableEntryMarkBlockModified(BlockRefTableEntry *entry, } /* - * Release memory for a BlockRefTablEntry that was created by + * Release memory for a BlockRefTableEntry that was created by * CreateBlockRefTableEntry. */ void diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index a06a8f0b23..ae1bee42a9 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -57,6 +57,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202312071 +#define CATALOG_VERSION_NO 202312211 #endif diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 916c8ec8d0..b8b26c263d 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -12109,7 +12109,7 @@ proargnames => '{tli,start_lsn,end_lsn}', prosrc => 'pg_available_wal_summaries' }, { oid => '8437', - descr => 'contents of a WAL sumamry file', + descr => 'contents of a WAL summary file', proname => 'pg_wal_summary_contents', prorows => '100', proretset => 't', provolatile => 'v', proparallel => 's', prorettype => 'record', proargtypes => 'int8 pg_lsn pg_lsn',