Remove more volatile qualifiers.

Prior to commit 0709b7ee72, access to
variables within a spinlock-protected critical section had to be done
through a volatile pointer, but that should no longer be necessary.
This continues work begun in df4077cda2
and 6ba4ecbf47.

Thomas Munro and Michael Paquier
This commit is contained in:
Robert Haas 2015-10-06 15:45:02 -04:00
parent b943f502b7
commit 8f6bb851bd
6 changed files with 108 additions and 182 deletions

View File

@ -288,13 +288,10 @@ CheckpointerMain(void)
/* Warn any waiting backends that the checkpoint failed. */ /* Warn any waiting backends that the checkpoint failed. */
if (ckpt_active) if (ckpt_active)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
volatile CheckpointerShmemStruct *cps = CheckpointerShmem; CheckpointerShmem->ckpt_failed++;
CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
SpinLockAcquire(&cps->ckpt_lck); SpinLockRelease(&CheckpointerShmem->ckpt_lck);
cps->ckpt_failed++;
cps->ckpt_done = cps->ckpt_started;
SpinLockRelease(&cps->ckpt_lck);
ckpt_active = false; ckpt_active = false;
} }
@ -428,9 +425,6 @@ CheckpointerMain(void)
bool ckpt_performed = false; bool ckpt_performed = false;
bool do_restartpoint; bool do_restartpoint;
/* use volatile pointer to prevent code rearrangement */
volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
/* /*
* Check if we should perform a checkpoint or a restartpoint. As a * Check if we should perform a checkpoint or a restartpoint. As a
* side-effect, RecoveryInProgress() initializes TimeLineID if * side-effect, RecoveryInProgress() initializes TimeLineID if
@ -443,11 +437,11 @@ CheckpointerMain(void)
* checkpoint we should perform, and increase the started-counter * checkpoint we should perform, and increase the started-counter
* to acknowledge that we've started a new checkpoint. * to acknowledge that we've started a new checkpoint.
*/ */
SpinLockAcquire(&cps->ckpt_lck); SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
flags |= cps->ckpt_flags; flags |= CheckpointerShmem->ckpt_flags;
cps->ckpt_flags = 0; CheckpointerShmem->ckpt_flags = 0;
cps->ckpt_started++; CheckpointerShmem->ckpt_started++;
SpinLockRelease(&cps->ckpt_lck); SpinLockRelease(&CheckpointerShmem->ckpt_lck);
/* /*
* The end-of-recovery checkpoint is a real checkpoint that's * The end-of-recovery checkpoint is a real checkpoint that's
@ -505,9 +499,9 @@ CheckpointerMain(void)
/* /*
* Indicate checkpoint completion to any waiting backends. * Indicate checkpoint completion to any waiting backends.
*/ */
SpinLockAcquire(&cps->ckpt_lck); SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
cps->ckpt_done = cps->ckpt_started; CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
SpinLockRelease(&cps->ckpt_lck); SpinLockRelease(&CheckpointerShmem->ckpt_lck);
if (ckpt_performed) if (ckpt_performed)
{ {
@ -957,8 +951,6 @@ CheckpointerShmemInit(void)
void void
RequestCheckpoint(int flags) RequestCheckpoint(int flags)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
int ntries; int ntries;
int old_failed, int old_failed,
old_started; old_started;
@ -992,13 +984,13 @@ RequestCheckpoint(int flags)
* a "stronger" request by another backend. The flag senses must be * a "stronger" request by another backend. The flag senses must be
* chosen to make this work! * chosen to make this work!
*/ */
SpinLockAcquire(&cps->ckpt_lck); SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
old_failed = cps->ckpt_failed; old_failed = CheckpointerShmem->ckpt_failed;
old_started = cps->ckpt_started; old_started = CheckpointerShmem->ckpt_started;
cps->ckpt_flags |= flags; CheckpointerShmem->ckpt_flags |= flags;
SpinLockRelease(&cps->ckpt_lck); SpinLockRelease(&CheckpointerShmem->ckpt_lck);
/* /*
* Send signal to request checkpoint. It's possible that the checkpointer * Send signal to request checkpoint. It's possible that the checkpointer
@ -1046,9 +1038,9 @@ RequestCheckpoint(int flags)
/* Wait for a new checkpoint to start. */ /* Wait for a new checkpoint to start. */
for (;;) for (;;)
{ {
SpinLockAcquire(&cps->ckpt_lck); SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
new_started = cps->ckpt_started; new_started = CheckpointerShmem->ckpt_started;
SpinLockRelease(&cps->ckpt_lck); SpinLockRelease(&CheckpointerShmem->ckpt_lck);
if (new_started != old_started) if (new_started != old_started)
break; break;
@ -1064,10 +1056,10 @@ RequestCheckpoint(int flags)
{ {
int new_done; int new_done;
SpinLockAcquire(&cps->ckpt_lck); SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
new_done = cps->ckpt_done; new_done = CheckpointerShmem->ckpt_done;
new_failed = cps->ckpt_failed; new_failed = CheckpointerShmem->ckpt_failed;
SpinLockRelease(&cps->ckpt_lck); SpinLockRelease(&CheckpointerShmem->ckpt_lck);
if (new_done - new_started >= 0) if (new_done - new_started >= 0)
break; break;
@ -1368,15 +1360,13 @@ UpdateSharedMemoryConfig(void)
bool bool
FirstCallSinceLastCheckpoint(void) FirstCallSinceLastCheckpoint(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
static int ckpt_done = 0; static int ckpt_done = 0;
int new_done; int new_done;
bool FirstCall = false; bool FirstCall = false;
SpinLockAcquire(&cps->ckpt_lck); SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
new_done = cps->ckpt_done; new_done = CheckpointerShmem->ckpt_done;
SpinLockRelease(&cps->ckpt_lck); SpinLockRelease(&CheckpointerShmem->ckpt_lck);
if (new_done != ckpt_done) if (new_done != ckpt_done)
FirstCall = true; FirstCall = true;

View File

@ -848,16 +848,13 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
bool updated_xmin = false; bool updated_xmin = false;
bool updated_restart = false; bool updated_restart = false;
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&MyReplicationSlot->mutex);
volatile ReplicationSlot *slot = MyReplicationSlot;
SpinLockAcquire(&slot->mutex); MyReplicationSlot->data.confirmed_flush = lsn;
slot->data.confirmed_flush = lsn;
/* if were past the location required for bumping xmin, do so */ /* if were past the location required for bumping xmin, do so */
if (slot->candidate_xmin_lsn != InvalidXLogRecPtr && if (MyReplicationSlot->candidate_xmin_lsn != InvalidXLogRecPtr &&
slot->candidate_xmin_lsn <= lsn) MyReplicationSlot->candidate_xmin_lsn <= lsn)
{ {
/* /*
* We have to write the changed xmin to disk *before* we change * We have to write the changed xmin to disk *before* we change
@ -868,28 +865,28 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
* ->effective_xmin once the new state is synced to disk. After a * ->effective_xmin once the new state is synced to disk. After a
* crash ->effective_xmin is set to ->xmin. * crash ->effective_xmin is set to ->xmin.
*/ */
if (TransactionIdIsValid(slot->candidate_catalog_xmin) && if (TransactionIdIsValid(MyReplicationSlot->candidate_catalog_xmin) &&
slot->data.catalog_xmin != slot->candidate_catalog_xmin) MyReplicationSlot->data.catalog_xmin != MyReplicationSlot->candidate_catalog_xmin)
{ {
slot->data.catalog_xmin = slot->candidate_catalog_xmin; MyReplicationSlot->data.catalog_xmin = MyReplicationSlot->candidate_catalog_xmin;
slot->candidate_catalog_xmin = InvalidTransactionId; MyReplicationSlot->candidate_catalog_xmin = InvalidTransactionId;
slot->candidate_xmin_lsn = InvalidXLogRecPtr; MyReplicationSlot->candidate_xmin_lsn = InvalidXLogRecPtr;
updated_xmin = true; updated_xmin = true;
} }
} }
if (slot->candidate_restart_valid != InvalidXLogRecPtr && if (MyReplicationSlot->candidate_restart_valid != InvalidXLogRecPtr &&
slot->candidate_restart_valid <= lsn) MyReplicationSlot->candidate_restart_valid <= lsn)
{ {
Assert(slot->candidate_restart_lsn != InvalidXLogRecPtr); Assert(MyReplicationSlot->candidate_restart_lsn != InvalidXLogRecPtr);
slot->data.restart_lsn = slot->candidate_restart_lsn; MyReplicationSlot->data.restart_lsn = MyReplicationSlot->candidate_restart_lsn;
slot->candidate_restart_lsn = InvalidXLogRecPtr; MyReplicationSlot->candidate_restart_lsn = InvalidXLogRecPtr;
slot->candidate_restart_valid = InvalidXLogRecPtr; MyReplicationSlot->candidate_restart_valid = InvalidXLogRecPtr;
updated_restart = true; updated_restart = true;
} }
SpinLockRelease(&slot->mutex); SpinLockRelease(&MyReplicationSlot->mutex);
/* first write new xmin to disk, so we know whats up after a crash */ /* first write new xmin to disk, so we know whats up after a crash */
if (updated_xmin || updated_restart) if (updated_xmin || updated_restart)
@ -907,9 +904,9 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
*/ */
if (updated_xmin) if (updated_xmin)
{ {
SpinLockAcquire(&slot->mutex); SpinLockAcquire(&MyReplicationSlot->mutex);
slot->effective_catalog_xmin = slot->data.catalog_xmin; MyReplicationSlot->effective_catalog_xmin = MyReplicationSlot->data.catalog_xmin;
SpinLockRelease(&slot->mutex); SpinLockRelease(&MyReplicationSlot->mutex);
ReplicationSlotsComputeRequiredXmin(false); ReplicationSlotsComputeRequiredXmin(false);
ReplicationSlotsComputeRequiredLSN(); ReplicationSlotsComputeRequiredLSN();
@ -917,10 +914,8 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
} }
else else
{ {
volatile ReplicationSlot *slot = MyReplicationSlot; SpinLockAcquire(&MyReplicationSlot->mutex);
MyReplicationSlot->data.confirmed_flush = lsn;
SpinLockAcquire(&slot->mutex); SpinLockRelease(&MyReplicationSlot->mutex);
slot->data.confirmed_flush = lsn;
SpinLockRelease(&slot->mutex);
} }
} }

View File

@ -288,15 +288,11 @@ ReplicationSlotCreate(const char *name, bool db_specific,
slot->in_use = true; slot->in_use = true;
/* We can now mark the slot active, and that makes it our slot. */ /* We can now mark the slot active, and that makes it our slot. */
{ SpinLockAcquire(&slot->mutex);
volatile ReplicationSlot *vslot = slot; Assert(slot->active_pid == 0);
slot->active_pid = MyProcPid;
SpinLockAcquire(&slot->mutex); SpinLockRelease(&slot->mutex);
Assert(vslot->active_pid == 0); MyReplicationSlot = slot;
vslot->active_pid = MyProcPid;
SpinLockRelease(&slot->mutex);
MyReplicationSlot = slot;
}
LWLockRelease(ReplicationSlotControlLock); LWLockRelease(ReplicationSlotControlLock);
@ -329,12 +325,10 @@ ReplicationSlotAcquire(const char *name)
if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0) if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
{ {
volatile ReplicationSlot *vslot = s;
SpinLockAcquire(&s->mutex); SpinLockAcquire(&s->mutex);
active_pid = vslot->active_pid; active_pid = s->active_pid;
if (active_pid == 0) if (active_pid == 0)
vslot->active_pid = MyProcPid; s->active_pid = MyProcPid;
SpinLockRelease(&s->mutex); SpinLockRelease(&s->mutex);
slot = s; slot = s;
break; break;
@ -380,10 +374,8 @@ ReplicationSlotRelease(void)
else else
{ {
/* Mark slot inactive. We're not freeing it, just disconnecting. */ /* Mark slot inactive. We're not freeing it, just disconnecting. */
volatile ReplicationSlot *vslot = slot;
SpinLockAcquire(&slot->mutex); SpinLockAcquire(&slot->mutex);
vslot->active_pid = 0; slot->active_pid = 0;
SpinLockRelease(&slot->mutex); SpinLockRelease(&slot->mutex);
} }
@ -459,11 +451,10 @@ ReplicationSlotDropAcquired(void)
} }
else else
{ {
volatile ReplicationSlot *vslot = slot;
bool fail_softly = slot->data.persistency == RS_EPHEMERAL; bool fail_softly = slot->data.persistency == RS_EPHEMERAL;
SpinLockAcquire(&slot->mutex); SpinLockAcquire(&slot->mutex);
vslot->active_pid = 0; slot->active_pid = 0;
SpinLockRelease(&slot->mutex); SpinLockRelease(&slot->mutex);
ereport(fail_softly ? WARNING : ERROR, ereport(fail_softly ? WARNING : ERROR,
@ -533,16 +524,13 @@ ReplicationSlotSave(void)
void void
ReplicationSlotMarkDirty(void) ReplicationSlotMarkDirty(void)
{ {
ReplicationSlot *slot = MyReplicationSlot;
Assert(MyReplicationSlot != NULL); Assert(MyReplicationSlot != NULL);
{ SpinLockAcquire(&slot->mutex);
volatile ReplicationSlot *vslot = MyReplicationSlot; MyReplicationSlot->just_dirtied = true;
MyReplicationSlot->dirty = true;
SpinLockAcquire(&vslot->mutex); SpinLockRelease(&slot->mutex);
MyReplicationSlot->just_dirtied = true;
MyReplicationSlot->dirty = true;
SpinLockRelease(&vslot->mutex);
}
} }
/* /*
@ -557,13 +545,9 @@ ReplicationSlotPersist(void)
Assert(slot != NULL); Assert(slot != NULL);
Assert(slot->data.persistency != RS_PERSISTENT); Assert(slot->data.persistency != RS_PERSISTENT);
{ SpinLockAcquire(&slot->mutex);
volatile ReplicationSlot *vslot = slot; slot->data.persistency = RS_PERSISTENT;
SpinLockRelease(&slot->mutex);
SpinLockAcquire(&slot->mutex);
vslot->data.persistency = RS_PERSISTENT;
SpinLockRelease(&slot->mutex);
}
ReplicationSlotMarkDirty(); ReplicationSlotMarkDirty();
ReplicationSlotSave(); ReplicationSlotSave();
@ -593,14 +577,10 @@ ReplicationSlotsComputeRequiredXmin(bool already_locked)
if (!s->in_use) if (!s->in_use)
continue; continue;
{ SpinLockAcquire(&s->mutex);
volatile ReplicationSlot *vslot = s; effective_xmin = s->effective_xmin;
effective_catalog_xmin = s->effective_catalog_xmin;
SpinLockAcquire(&s->mutex); SpinLockRelease(&s->mutex);
effective_xmin = vslot->effective_xmin;
effective_catalog_xmin = vslot->effective_catalog_xmin;
SpinLockRelease(&s->mutex);
}
/* check the data xmin */ /* check the data xmin */
if (TransactionIdIsValid(effective_xmin) && if (TransactionIdIsValid(effective_xmin) &&
@ -641,13 +621,9 @@ ReplicationSlotsComputeRequiredLSN(void)
if (!s->in_use) if (!s->in_use)
continue; continue;
{ SpinLockAcquire(&s->mutex);
volatile ReplicationSlot *vslot = s; restart_lsn = s->data.restart_lsn;
SpinLockRelease(&s->mutex);
SpinLockAcquire(&s->mutex);
restart_lsn = vslot->data.restart_lsn;
SpinLockRelease(&s->mutex);
}
if (restart_lsn != InvalidXLogRecPtr && if (restart_lsn != InvalidXLogRecPtr &&
(min_required == InvalidXLogRecPtr || (min_required == InvalidXLogRecPtr ||
@ -684,7 +660,7 @@ ReplicationSlotsComputeLogicalRestartLSN(void)
for (i = 0; i < max_replication_slots; i++) for (i = 0; i < max_replication_slots; i++)
{ {
volatile ReplicationSlot *s; ReplicationSlot *s;
XLogRecPtr restart_lsn; XLogRecPtr restart_lsn;
s = &ReplicationSlotCtl->replication_slots[i]; s = &ReplicationSlotCtl->replication_slots[i];
@ -733,7 +709,7 @@ ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive)
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++) for (i = 0; i < max_replication_slots; i++)
{ {
volatile ReplicationSlot *s; ReplicationSlot *s;
s = &ReplicationSlotCtl->replication_slots[i]; s = &ReplicationSlotCtl->replication_slots[i];
@ -1023,14 +999,10 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
bool was_dirty; bool was_dirty;
/* first check whether there's something to write out */ /* first check whether there's something to write out */
{ SpinLockAcquire(&slot->mutex);
volatile ReplicationSlot *vslot = slot; was_dirty = slot->dirty;
slot->just_dirtied = false;
SpinLockAcquire(&vslot->mutex); SpinLockRelease(&slot->mutex);
was_dirty = vslot->dirty;
vslot->just_dirtied = false;
SpinLockRelease(&vslot->mutex);
}
/* and don't do anything if there's nothing to write */ /* and don't do anything if there's nothing to write */
if (!was_dirty) if (!was_dirty)
@ -1124,14 +1096,10 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
* Successfully wrote, unset dirty bit, unless somebody dirtied again * Successfully wrote, unset dirty bit, unless somebody dirtied again
* already. * already.
*/ */
{ SpinLockAcquire(&slot->mutex);
volatile ReplicationSlot *vslot = slot; if (!slot->just_dirtied)
slot->dirty = false;
SpinLockAcquire(&vslot->mutex); SpinLockRelease(&slot->mutex);
if (!vslot->just_dirtied)
vslot->dirty = false;
SpinLockRelease(&vslot->mutex);
}
LWLockRelease(slot->io_in_progress_lock); LWLockRelease(slot->io_in_progress_lock);
} }

View File

@ -192,9 +192,7 @@ WalReceiverMain(void)
TimeLineID startpointTLI; TimeLineID startpointTLI;
TimeLineID primaryTLI; TimeLineID primaryTLI;
bool first_stream; bool first_stream;
WalRcvData *walrcv = WalRcv;
/* use volatile pointer to prevent code rearrangement */
volatile WalRcvData *walrcv = WalRcv;
TimestampTz last_recv_timestamp; TimestampTz last_recv_timestamp;
bool ping_sent; bool ping_sent;
@ -559,8 +557,7 @@ WalReceiverMain(void)
static void static void
WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI) WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
int state; int state;
SpinLockAcquire(&walrcv->mutex); SpinLockAcquire(&walrcv->mutex);
@ -693,8 +690,7 @@ WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last)
static void static void
WalRcvDie(int code, Datum arg) WalRcvDie(int code, Datum arg)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
/* Ensure that all WAL records received are flushed to disk */ /* Ensure that all WAL records received are flushed to disk */
XLogWalRcvFlush(true); XLogWalRcvFlush(true);
@ -974,8 +970,7 @@ XLogWalRcvFlush(bool dying)
{ {
if (LogstreamResult.Flush < LogstreamResult.Write) if (LogstreamResult.Flush < LogstreamResult.Write)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
issue_xlog_fsync(recvFile, recvSegNo); issue_xlog_fsync(recvFile, recvSegNo);
@ -1179,8 +1174,7 @@ XLogWalRcvSendHSFeedback(bool immed)
static void static void
ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime) ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
TimestampTz lastMsgReceiptTime = GetCurrentTimestamp(); TimestampTz lastMsgReceiptTime = GetCurrentTimestamp();

View File

@ -72,8 +72,7 @@ WalRcvShmemInit(void)
bool bool
WalRcvRunning(void) WalRcvRunning(void)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
WalRcvState state; WalRcvState state;
pg_time_t startTime; pg_time_t startTime;
@ -118,8 +117,7 @@ WalRcvRunning(void)
bool bool
WalRcvStreaming(void) WalRcvStreaming(void)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
WalRcvState state; WalRcvState state;
pg_time_t startTime; pg_time_t startTime;
@ -165,8 +163,7 @@ WalRcvStreaming(void)
void void
ShutdownWalRcv(void) ShutdownWalRcv(void)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
pid_t walrcvpid = 0; pid_t walrcvpid = 0;
/* /*
@ -227,8 +224,7 @@ void
RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo, RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
const char *slotname) const char *slotname)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
bool launch = false; bool launch = false;
pg_time_t now = (pg_time_t) time(NULL); pg_time_t now = (pg_time_t) time(NULL);
@ -298,8 +294,7 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
XLogRecPtr XLogRecPtr
GetWalRcvWriteRecPtr(XLogRecPtr *latestChunkStart, TimeLineID *receiveTLI) GetWalRcvWriteRecPtr(XLogRecPtr *latestChunkStart, TimeLineID *receiveTLI)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
XLogRecPtr recptr; XLogRecPtr recptr;
SpinLockAcquire(&walrcv->mutex); SpinLockAcquire(&walrcv->mutex);
@ -320,9 +315,7 @@ GetWalRcvWriteRecPtr(XLogRecPtr *latestChunkStart, TimeLineID *receiveTLI)
int int
GetReplicationApplyDelay(void) GetReplicationApplyDelay(void)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
XLogRecPtr receivePtr; XLogRecPtr receivePtr;
XLogRecPtr replayPtr; XLogRecPtr replayPtr;
@ -359,8 +352,7 @@ GetReplicationApplyDelay(void)
int int
GetReplicationTransferLatency(void) GetReplicationTransferLatency(void)
{ {
/* use volatile pointer to prevent code rearrangement */ WalRcvData *walrcv = WalRcv;
volatile WalRcvData *walrcv = WalRcv;
TimestampTz lastMsgSendTime; TimestampTz lastMsgSendTime;
TimestampTz lastMsgReceiptTime; TimestampTz lastMsgReceiptTime;

View File

@ -641,8 +641,7 @@ StartReplication(StartReplicationCmd *cmd)
/* Initialize shared memory status, too */ /* Initialize shared memory status, too */
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = MyWalSnd;
volatile WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex); SpinLockAcquire(&walsnd->mutex);
walsnd->sentPtr = sentPtr; walsnd->sentPtr = sentPtr;
@ -990,8 +989,7 @@ StartLogicalReplication(StartReplicationCmd *cmd)
/* Also update the sent position status in shared memory */ /* Also update the sent position status in shared memory */
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = MyWalSnd;
volatile WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex); SpinLockAcquire(&walsnd->mutex);
walsnd->sentPtr = MyReplicationSlot->data.restart_lsn; walsnd->sentPtr = MyReplicationSlot->data.restart_lsn;
@ -1494,9 +1492,7 @@ static void
PhysicalConfirmReceivedLocation(XLogRecPtr lsn) PhysicalConfirmReceivedLocation(XLogRecPtr lsn)
{ {
bool changed = false; bool changed = false;
ReplicationSlot *slot = MyReplicationSlot;
/* use volatile pointer to prevent code rearrangement */
volatile ReplicationSlot *slot = MyReplicationSlot;
Assert(lsn != InvalidXLogRecPtr); Assert(lsn != InvalidXLogRecPtr);
SpinLockAcquire(&slot->mutex); SpinLockAcquire(&slot->mutex);
@ -1554,8 +1550,7 @@ ProcessStandbyReplyMessage(void)
* standby. * standby.
*/ */
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = MyWalSnd;
volatile WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex); SpinLockAcquire(&walsnd->mutex);
walsnd->write = writePtr; walsnd->write = writePtr;
@ -1584,7 +1579,7 @@ static void
PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin) PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin)
{ {
bool changed = false; bool changed = false;
volatile ReplicationSlot *slot = MyReplicationSlot; ReplicationSlot *slot = MyReplicationSlot;
SpinLockAcquire(&slot->mutex); SpinLockAcquire(&slot->mutex);
MyPgXact->xmin = InvalidTransactionId; MyPgXact->xmin = InvalidTransactionId;
@ -1934,8 +1929,7 @@ InitWalSenderSlot(void)
*/ */
for (i = 0; i < max_wal_senders; i++) for (i = 0; i < max_wal_senders; i++)
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = &WalSndCtl->walsnds[i];
volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
SpinLockAcquire(&walsnd->mutex); SpinLockAcquire(&walsnd->mutex);
@ -2145,8 +2139,7 @@ retry:
*/ */
if (am_cascading_walsender) if (am_cascading_walsender)
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = MyWalSnd;
volatile WalSnd *walsnd = MyWalSnd;
bool reload; bool reload;
SpinLockAcquire(&walsnd->mutex); SpinLockAcquire(&walsnd->mutex);
@ -2384,8 +2377,7 @@ XLogSendPhysical(void)
/* Update shared memory status */ /* Update shared memory status */
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = MyWalSnd;
volatile WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex); SpinLockAcquire(&walsnd->mutex);
walsnd->sentPtr = sentPtr; walsnd->sentPtr = sentPtr;
@ -2447,8 +2439,7 @@ XLogSendLogical(void)
/* Update shared memory status */ /* Update shared memory status */
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = MyWalSnd;
volatile WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex); SpinLockAcquire(&walsnd->mutex);
walsnd->sentPtr = sentPtr; walsnd->sentPtr = sentPtr;
@ -2539,8 +2530,7 @@ WalSndRqstFileReload(void)
for (i = 0; i < max_wal_senders; i++) for (i = 0; i < max_wal_senders; i++)
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = &WalSndCtl->walsnds[i];
volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
if (walsnd->pid == 0) if (walsnd->pid == 0)
continue; continue;
@ -2692,8 +2682,7 @@ WalSndWakeup(void)
void void
WalSndSetState(WalSndState state) WalSndSetState(WalSndState state)
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = MyWalSnd;
volatile WalSnd *walsnd = MyWalSnd;
Assert(am_walsender); Assert(am_walsender);
@ -2777,8 +2766,7 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS)
for (i = 0; i < max_wal_senders; i++) for (i = 0; i < max_wal_senders; i++)
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = &WalSndCtl->walsnds[i];
volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
XLogRecPtr sentPtr; XLogRecPtr sentPtr;
XLogRecPtr write; XLogRecPtr write;
XLogRecPtr flush; XLogRecPtr flush;
@ -2934,8 +2922,7 @@ GetOldestWALSendPointer(void)
for (i = 0; i < max_wal_senders; i++) for (i = 0; i < max_wal_senders; i++)
{ {
/* use volatile pointer to prevent code rearrangement */ WalSnd *walsnd = &WalSndCtl->walsnds[i];
volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
XLogRecPtr recptr; XLogRecPtr recptr;
if (walsnd->pid == 0) if (walsnd->pid == 0)