Convert unloggedLSN to an atomic variable.

Currently, this variable is an XLogRecPtr protected by a spinlock.
By converting it to an atomic variable, we can remove the spinlock,
which saves a small amount of shared memory space.  Since this code
is not performance-critical, we use atomic operations with full
barrier semantics to make it easy to reason about correctness.

Author: John Morris
Reviewed-by: Michael Paquier, Robert Haas, Andres Freund, Stephen Frost, Bharath Rupireddy
Discussion: https://postgr.es/m/BYAPR13MB26772534335255E50318C574A0409%40BYAPR13MB2677.namprd13.prod.outlook.com
Discussion: https://postgr.es/m/MN2PR13MB2688FD8B757316CB5C54C8A2A0DDA%40MN2PR13MB2688.namprd13.prod.outlook.com
This commit is contained in:
Nathan Bossart 2024-02-29 14:34:10 -06:00
parent 3179701505
commit 963d3072af
1 changed files with 9 additions and 17 deletions

View File

@ -470,9 +470,8 @@ typedef struct XLogCtlData
XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */ XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */
/* Fake LSN counter, for unlogged relations. Protected by ulsn_lck. */ /* Fake LSN counter, for unlogged relations. */
XLogRecPtr unloggedLSN; pg_atomic_uint64 unloggedLSN;
slock_t ulsn_lck;
/* Time and LSN of last xlog segment switch. Protected by WALWriteLock. */ /* Time and LSN of last xlog segment switch. Protected by WALWriteLock. */
pg_time_t lastSegSwitchTime; pg_time_t lastSegSwitchTime;
@ -4498,14 +4497,7 @@ DataChecksumsEnabled(void)
XLogRecPtr XLogRecPtr
GetFakeLSNForUnloggedRel(void) GetFakeLSNForUnloggedRel(void)
{ {
XLogRecPtr nextUnloggedLSN; return pg_atomic_fetch_add_u64(&XLogCtl->unloggedLSN, 1);
/* increment the unloggedLSN counter, need SpinLock */
SpinLockAcquire(&XLogCtl->ulsn_lck);
nextUnloggedLSN = XLogCtl->unloggedLSN++;
SpinLockRelease(&XLogCtl->ulsn_lck);
return nextUnloggedLSN;
} }
/* /*
@ -4921,7 +4913,7 @@ XLOGShmemInit(void)
SpinLockInit(&XLogCtl->Insert.insertpos_lck); SpinLockInit(&XLogCtl->Insert.insertpos_lck);
SpinLockInit(&XLogCtl->info_lck); SpinLockInit(&XLogCtl->info_lck);
SpinLockInit(&XLogCtl->ulsn_lck); pg_atomic_init_u64(&XLogCtl->unloggedLSN, InvalidXLogRecPtr);
} }
/* /*
@ -5526,9 +5518,11 @@ StartupXLOG(void)
* the unlogged LSN counter can be reset too. * the unlogged LSN counter can be reset too.
*/ */
if (ControlFile->state == DB_SHUTDOWNED) if (ControlFile->state == DB_SHUTDOWNED)
XLogCtl->unloggedLSN = ControlFile->unloggedLSN; pg_atomic_write_membarrier_u64(&XLogCtl->unloggedLSN,
ControlFile->unloggedLSN);
else else
XLogCtl->unloggedLSN = FirstNormalUnloggedLSN; pg_atomic_write_membarrier_u64(&XLogCtl->unloggedLSN,
FirstNormalUnloggedLSN);
/* /*
* Copy any missing timeline history files between 'now' and the recovery * Copy any missing timeline history files between 'now' and the recovery
@ -7110,9 +7104,7 @@ CreateCheckPoint(int flags)
* unused on non-shutdown checkpoints, but seems useful to store it always * unused on non-shutdown checkpoints, but seems useful to store it always
* for debugging purposes. * for debugging purposes.
*/ */
SpinLockAcquire(&XLogCtl->ulsn_lck); ControlFile->unloggedLSN = pg_atomic_read_membarrier_u64(&XLogCtl->unloggedLSN);
ControlFile->unloggedLSN = XLogCtl->unloggedLSN;
SpinLockRelease(&XLogCtl->ulsn_lck);
UpdateControlFile(); UpdateControlFile();
LWLockRelease(ControlFileLock); LWLockRelease(ControlFileLock);