From 7863ee4def653f2c2193cb0b0cf4a8f0f3ca6c56 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Mon, 9 May 2022 08:39:59 +0900 Subject: [PATCH] Fix control file update done in restartpoints still running after promotion If a cluster is promoted (aka the control file shows a state different than DB_IN_ARCHIVE_RECOVERY) while CreateRestartPoint() is still processing, this function could miss an update of the control file for "checkPoint" and "checkPointCopy" but still do the recycling and/or removal of the past WAL segments, assuming that the to-be-updated LSN values should be used as reference points for the cleanup. This causes a follow-up restart attempting crash recovery to fail with a PANIC on a missing checkpoint record if the end-of-recovery checkpoint triggered by the promotion did not complete while the cluster abruptly stopped or crashed before the completion of this checkpoint. The PANIC would be caused by the redo LSN referred in the control file as located in a segment already gone, recycled by the previous restartpoint with "checkPoint" out-of-sync in the control file. This commit fixes the update of the control file during restartpoints so as "checkPoint" and "checkPointCopy" are updated even if the cluster has been promoted while a restartpoint is running, to be on par with the set of WAL segments actually recycled in the end of CreateRestartPoint(). This problem exists in all the stable branches. However, commit 7ff23c6, by removing the last call of CreateCheckPoint() from the startup process, has made this bug much easier to reason about as concurrent checkpoints are not possible anymore. No backpatch is done yet, mostly out of caution from me as a point release is close by, but we need to think harder about the case of concurrent checkpoints at promotion if the bgwriter is not considered as running by the startup process in ~v14, so this change is done only on HEAD for the moment. Reported-by: Fujii Masao, Rui Zhao Author: Kyotaro Horiguchi Reviewed-by: Nathan Bossart, Michael Paquier Discussion: https://postgr.es/m/20220316.102444.2193181487576617583.horikyota.ntt@gmail.com --- src/backend/access/transam/xlog.c | 54 +++++++++++++++++++------------ 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 61cda56c6f..36852f2327 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -6921,6 +6921,9 @@ CreateRestartPoint(int flags) XLogSegNo _logSegNo; TimestampTz xtime; + /* Concurrent checkpoint/restartpoint cannot happen */ + Assert(!IsUnderPostmaster || MyBackendType == B_CHECKPOINTER); + /* Get a local copy of the last safe checkpoint record. */ SpinLockAcquire(&XLogCtl->info_lck); lastCheckPointRecPtr = XLogCtl->lastCheckPointRecPtr; @@ -7014,40 +7017,49 @@ CreateRestartPoint(int flags) PriorRedoPtr = ControlFile->checkPointCopy.redo; /* - * Update pg_control, using current time. Check that it still shows - * DB_IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing; - * this is a quick hack to make sure nothing really bad happens if somehow - * we get here after the end-of-recovery checkpoint. + * Update pg_control, using current time. Check that it still shows an + * older checkpoint, else do nothing; this is a quick hack to make sure + * nothing really bad happens if somehow we get here after the + * end-of-recovery checkpoint. */ LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); - if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY && - ControlFile->checkPointCopy.redo < lastCheckPoint.redo) + if (ControlFile->checkPointCopy.redo < lastCheckPoint.redo) { + /* + * Update the checkpoint information. We do this even if the cluster + * does not show DB_IN_ARCHIVE_RECOVERY to match with the set of WAL + * segments recycled below. + */ ControlFile->checkPoint = lastCheckPointRecPtr; ControlFile->checkPointCopy = lastCheckPoint; /* - * Ensure minRecoveryPoint is past the checkpoint record. Normally, + * Ensure minRecoveryPoint is past the checkpoint record and update it + * if the control file still shows DB_IN_ARCHIVE_RECOVERY. Normally, * this will have happened already while writing out dirty buffers, * but not necessarily - e.g. because no buffers were dirtied. We do - * this because a backup performed in recovery uses minRecoveryPoint to - * determine which WAL files must be included in the backup, and the - * file (or files) containing the checkpoint record must be included, - * at a minimum. Note that for an ordinary restart of recovery there's - * no value in having the minimum recovery point any earlier than this - * anyway, because redo will begin just after the checkpoint record. + * this because a backup performed in recovery uses minRecoveryPoint + * to determine which WAL files must be included in the backup, and + * the file (or files) containing the checkpoint record must be + * included, at a minimum. Note that for an ordinary restart of + * recovery there's no value in having the minimum recovery point any + * earlier than this anyway, because redo will begin just after the + * checkpoint record. */ - if (ControlFile->minRecoveryPoint < lastCheckPointEndPtr) + if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY) { - ControlFile->minRecoveryPoint = lastCheckPointEndPtr; - ControlFile->minRecoveryPointTLI = lastCheckPoint.ThisTimeLineID; + if (ControlFile->minRecoveryPoint < lastCheckPointEndPtr) + { + ControlFile->minRecoveryPoint = lastCheckPointEndPtr; + ControlFile->minRecoveryPointTLI = lastCheckPoint.ThisTimeLineID; - /* update local copy */ - LocalMinRecoveryPoint = ControlFile->minRecoveryPoint; - LocalMinRecoveryPointTLI = ControlFile->minRecoveryPointTLI; + /* update local copy */ + LocalMinRecoveryPoint = ControlFile->minRecoveryPoint; + LocalMinRecoveryPointTLI = ControlFile->minRecoveryPointTLI; + } + if (flags & CHECKPOINT_IS_SHUTDOWN) + ControlFile->state = DB_SHUTDOWNED_IN_RECOVERY; } - if (flags & CHECKPOINT_IS_SHUTDOWN) - ControlFile->state = DB_SHUTDOWNED_IN_RECOVERY; UpdateControlFile(); } LWLockRelease(ControlFileLock);