From dc88460c24ed71ba7464ef4749e5f25da1bf6652 Mon Sep 17 00:00:00 2001 From: Thomas Munro Date: Sat, 10 Apr 2021 08:09:30 +1200 Subject: [PATCH] Doc: Review for "Optionally prefetch referenced data in recovery." Typos, corrections and language improvements in the docs, and a few in code comments too. Reported-by: Justin Pryzby Discussion: https://postgr.es/m/20210409033703.GP6592%40telsasoft.com --- doc/src/sgml/config.sgml | 2 +- doc/src/sgml/wal.sgml | 4 +--- src/backend/access/transam/xlogprefetch.c | 12 +++++++----- src/backend/utils/misc/guc.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 3a062a145c..cc18b0bbf0 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -3621,7 +3621,7 @@ include_dir 'conf.d' pool after that. However, on file systems with a block size larger than PostgreSQL's, prefetching can avoid a - costly read-before-write when a blocks are later written. + costly read-before-write when blocks are later written. The default is off. diff --git a/doc/src/sgml/wal.sgml b/doc/src/sgml/wal.sgml index 24cf567ee2..36e00c92c2 100644 --- a/doc/src/sgml/wal.sgml +++ b/doc/src/sgml/wal.sgml @@ -816,9 +816,7 @@ prefetching mechanism is most likely to be effective on systems with full_page_writes set to off (where that is safe), and where the working - set is larger than RAM. By default, prefetching in recovery is enabled - on operating systems that have posix_fadvise - support. + set is larger than RAM. By default, prefetching in recovery is disabled. diff --git a/src/backend/access/transam/xlogprefetch.c b/src/backend/access/transam/xlogprefetch.c index 28764326bc..2178c9086e 100644 --- a/src/backend/access/transam/xlogprefetch.c +++ b/src/backend/access/transam/xlogprefetch.c @@ -31,12 +31,14 @@ * stall; this is counted with "skip_fpw". * * The only way we currently have to know that an I/O initiated with - * PrefetchSharedBuffer() has that recovery will eventually call ReadBuffer(), - * and perform a synchronous read. Therefore, we track the number of + * PrefetchSharedBuffer() has completed is to wait for the corresponding call + * to XLogReadBufferInRedo() to return. Therefore, we track the number of * potentially in-flight I/Os by using a circular buffer of LSNs. When it's - * full, we have to wait for recovery to replay records so that the queue - * depth can be reduced, before we can do any more prefetching. Ideally, this - * keeps us the right distance ahead to respect maintenance_io_concurrency. + * full, we have to wait for recovery to replay enough records to remove some + * LSNs, and only then can we initiate more prefetching. Ideally, this keeps + * us just the right distance ahead to respect maintenance_io_concurrency, + * though in practice it errs on the side of being too conservative because + * many I/Os complete sooner than we know. * *------------------------------------------------------------------------- */ diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 46f1d6406f..6dd889a7c0 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -2774,7 +2774,7 @@ static struct config_int ConfigureNamesInt[] = { {"wal_decode_buffer_size", PGC_POSTMASTER, WAL_ARCHIVE_RECOVERY, gettext_noop("Maximum buffer size for reading ahead in the WAL during recovery."), - gettext_noop("This controls the maximum distance we can read ahead n the WAL to prefetch referenced blocks."), + gettext_noop("This controls the maximum distance we can read ahead in the WAL to prefetch referenced blocks."), GUC_UNIT_BYTE }, &wal_decode_buffer_size,