Remove volatile qualifiers from lwlock.c.

Now that spinlocks (hopefully!) act as compiler barriers, as of commit
0709b7ee72, this should be safe.  This
serves as a demonstration of the new coding style, and may be optimized
better on some machines as well.
This commit is contained in:
Robert Haas 2014-09-22 16:42:14 -04:00
parent e38da8d6b1
commit df4077cda2

View File

@ -112,7 +112,7 @@ static lwlock_stats lwlock_stats_dummy;
bool Trace_lwlocks = false; bool Trace_lwlocks = false;
inline static void inline static void
PRINT_LWDEBUG(const char *where, const volatile LWLock *lock) PRINT_LWDEBUG(const char *where, const LWLock *lock)
{ {
if (Trace_lwlocks) if (Trace_lwlocks)
elog(LOG, "%s(%s %d): excl %d shared %d head %p rOK %d", elog(LOG, "%s(%s %d): excl %d shared %d head %p rOK %d",
@ -406,9 +406,7 @@ LWLock *
LWLockAssign(void) LWLockAssign(void)
{ {
LWLock *result; LWLock *result;
int *LWLockCounter;
/* use volatile pointer to prevent code rearrangement */
volatile int *LWLockCounter;
LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int)); LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int));
SpinLockAcquire(ShmemLock); SpinLockAcquire(ShmemLock);
@ -429,9 +427,7 @@ int
LWLockNewTrancheId(void) LWLockNewTrancheId(void)
{ {
int result; int result;
int *LWLockCounter;
/* use volatile pointer to prevent code rearrangement */
volatile int *LWLockCounter;
LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int)); LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int));
SpinLockAcquire(ShmemLock); SpinLockAcquire(ShmemLock);
@ -511,9 +507,8 @@ LWLockAcquireWithVar(LWLock *l, uint64 *valptr, uint64 val)
/* internal function to implement LWLockAcquire and LWLockAcquireWithVar */ /* internal function to implement LWLockAcquire and LWLockAcquireWithVar */
static inline bool static inline bool
LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, uint64 val) LWLockAcquireCommon(LWLock *lock, LWLockMode mode, uint64 *valptr, uint64 val)
{ {
volatile LWLock *lock = l;
PGPROC *proc = MyProc; PGPROC *proc = MyProc;
bool retry = false; bool retry = false;
bool result = true; bool result = true;
@ -525,7 +520,7 @@ LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, uint64 val)
PRINT_LWDEBUG("LWLockAcquire", lock); PRINT_LWDEBUG("LWLockAcquire", lock);
#ifdef LWLOCK_STATS #ifdef LWLOCK_STATS
lwstats = get_lwlock_stats_entry(l); lwstats = get_lwlock_stats_entry(lock);
/* Count lock acquisition attempts */ /* Count lock acquisition attempts */
if (mode == LW_EXCLUSIVE) if (mode == LW_EXCLUSIVE)
@ -642,13 +637,13 @@ LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, uint64 val)
* so that the lock manager or signal manager will see the received * so that the lock manager or signal manager will see the received
* signal when it next waits. * signal when it next waits.
*/ */
LOG_LWDEBUG("LWLockAcquire", T_NAME(l), T_ID(l), "waiting"); LOG_LWDEBUG("LWLockAcquire", T_NAME(lock), T_ID(lock), "waiting");
#ifdef LWLOCK_STATS #ifdef LWLOCK_STATS
lwstats->block_count++; lwstats->block_count++;
#endif #endif
TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(l), T_ID(l), mode); TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), T_ID(lock), mode);
for (;;) for (;;)
{ {
@ -659,9 +654,9 @@ LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, uint64 val)
extraWaits++; extraWaits++;
} }
TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(l), T_ID(l), mode); TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), T_ID(lock), mode);
LOG_LWDEBUG("LWLockAcquire", T_NAME(l), T_ID(l), "awakened"); LOG_LWDEBUG("LWLockAcquire", T_NAME(lock), T_ID(lock), "awakened");
/* Now loop back and try to acquire lock again. */ /* Now loop back and try to acquire lock again. */
retry = true; retry = true;
@ -675,10 +670,10 @@ LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, uint64 val)
/* We are done updating shared state of the lock itself. */ /* We are done updating shared state of the lock itself. */
SpinLockRelease(&lock->mutex); SpinLockRelease(&lock->mutex);
TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(l), T_ID(l), mode); TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), T_ID(lock), mode);
/* Add lock to list of locks held by this backend */ /* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks++] = l; held_lwlocks[num_held_lwlocks++] = lock;
/* /*
* Fix the process wait semaphore's count for any absorbed wakeups. * Fix the process wait semaphore's count for any absorbed wakeups.
@ -697,9 +692,8 @@ LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, uint64 val)
* If successful, cancel/die interrupts are held off until lock release. * If successful, cancel/die interrupts are held off until lock release.
*/ */
bool bool
LWLockConditionalAcquire(LWLock *l, LWLockMode mode) LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
{ {
volatile LWLock *lock = l;
bool mustwait; bool mustwait;
PRINT_LWDEBUG("LWLockConditionalAcquire", lock); PRINT_LWDEBUG("LWLockConditionalAcquire", lock);
@ -747,14 +741,16 @@ LWLockConditionalAcquire(LWLock *l, LWLockMode mode)
{ {
/* Failed to get lock, so release interrupt holdoff */ /* Failed to get lock, so release interrupt holdoff */
RESUME_INTERRUPTS(); RESUME_INTERRUPTS();
LOG_LWDEBUG("LWLockConditionalAcquire", T_NAME(l), T_ID(l), "failed"); LOG_LWDEBUG("LWLockConditionalAcquire",
TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(l), T_ID(l), mode); T_NAME(lock), T_ID(lock), "failed");
TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock),
T_ID(lock), mode);
} }
else else
{ {
/* Add lock to list of locks held by this backend */ /* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks++] = l; held_lwlocks[num_held_lwlocks++] = lock;
TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(l), T_ID(l), mode); TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), T_ID(lock), mode);
} }
return !mustwait; return !mustwait;
@ -775,9 +771,8 @@ LWLockConditionalAcquire(LWLock *l, LWLockMode mode)
* wake up, observe that their records have already been flushed, and return. * wake up, observe that their records have already been flushed, and return.
*/ */
bool bool
LWLockAcquireOrWait(LWLock *l, LWLockMode mode) LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
{ {
volatile LWLock *lock = l;
PGPROC *proc = MyProc; PGPROC *proc = MyProc;
bool mustwait; bool mustwait;
int extraWaits = 0; int extraWaits = 0;
@ -788,7 +783,7 @@ LWLockAcquireOrWait(LWLock *l, LWLockMode mode)
PRINT_LWDEBUG("LWLockAcquireOrWait", lock); PRINT_LWDEBUG("LWLockAcquireOrWait", lock);
#ifdef LWLOCK_STATS #ifdef LWLOCK_STATS
lwstats = get_lwlock_stats_entry(l); lwstats = get_lwlock_stats_entry(lock);
#endif #endif
/* Ensure we will have room to remember the lock */ /* Ensure we will have room to remember the lock */
@ -855,13 +850,14 @@ LWLockAcquireOrWait(LWLock *l, LWLockMode mode)
* Wait until awakened. Like in LWLockAcquire, be prepared for bogus * Wait until awakened. Like in LWLockAcquire, be prepared for bogus
* wakups, because we share the semaphore with ProcWaitForSignal. * wakups, because we share the semaphore with ProcWaitForSignal.
*/ */
LOG_LWDEBUG("LWLockAcquireOrWait", T_NAME(l), T_ID(l), "waiting"); LOG_LWDEBUG("LWLockAcquireOrWait", T_NAME(lock), T_ID(lock),
"waiting");
#ifdef LWLOCK_STATS #ifdef LWLOCK_STATS
lwstats->block_count++; lwstats->block_count++;
#endif #endif
TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(l), T_ID(l), mode); TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), T_ID(lock), mode);
for (;;) for (;;)
{ {
@ -872,9 +868,10 @@ LWLockAcquireOrWait(LWLock *l, LWLockMode mode)
extraWaits++; extraWaits++;
} }
TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(l), T_ID(l), mode); TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), T_ID(lock), mode);
LOG_LWDEBUG("LWLockAcquireOrWait", T_NAME(l), T_ID(l), "awakened"); LOG_LWDEBUG("LWLockAcquireOrWait", T_NAME(lock), T_ID(lock),
"awakened");
} }
else else
{ {
@ -892,14 +889,16 @@ LWLockAcquireOrWait(LWLock *l, LWLockMode mode)
{ {
/* Failed to get lock, so release interrupt holdoff */ /* Failed to get lock, so release interrupt holdoff */
RESUME_INTERRUPTS(); RESUME_INTERRUPTS();
LOG_LWDEBUG("LWLockAcquireOrWait", T_NAME(l), T_ID(l), "failed"); LOG_LWDEBUG("LWLockAcquireOrWait", T_NAME(lock), T_ID(lock), "failed");
TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(l), T_ID(l), mode); TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), T_ID(lock),
mode);
} }
else else
{ {
/* Add lock to list of locks held by this backend */ /* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks++] = l; held_lwlocks[num_held_lwlocks++] = lock;
TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(l), T_ID(l), mode); TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), T_ID(lock),
mode);
} }
return !mustwait; return !mustwait;
@ -924,10 +923,8 @@ LWLockAcquireOrWait(LWLock *l, LWLockMode mode)
* in shared mode, returns 'true'. * in shared mode, returns 'true'.
*/ */
bool bool
LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval) LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
{ {
volatile LWLock *lock = l;
volatile uint64 *valp = valptr;
PGPROC *proc = MyProc; PGPROC *proc = MyProc;
int extraWaits = 0; int extraWaits = 0;
bool result = false; bool result = false;
@ -938,7 +935,7 @@ LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval)
PRINT_LWDEBUG("LWLockWaitForVar", lock); PRINT_LWDEBUG("LWLockWaitForVar", lock);
#ifdef LWLOCK_STATS #ifdef LWLOCK_STATS
lwstats = get_lwlock_stats_entry(l); lwstats = get_lwlock_stats_entry(lock);
#endif /* LWLOCK_STATS */ #endif /* LWLOCK_STATS */
/* /*
@ -981,7 +978,7 @@ LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval)
} }
else else
{ {
value = *valp; value = *valptr;
if (value != oldval) if (value != oldval)
{ {
result = false; result = false;
@ -1023,13 +1020,14 @@ LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval)
* so that the lock manager or signal manager will see the received * so that the lock manager or signal manager will see the received
* signal when it next waits. * signal when it next waits.
*/ */
LOG_LWDEBUG("LWLockWaitForVar", T_NAME(l), T_ID(l), "waiting"); LOG_LWDEBUG("LWLockWaitForVar", T_NAME(lock), T_ID(lock), "waiting");
#ifdef LWLOCK_STATS #ifdef LWLOCK_STATS
lwstats->block_count++; lwstats->block_count++;
#endif #endif
TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(l), T_ID(l), LW_EXCLUSIVE); TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), T_ID(lock),
LW_EXCLUSIVE);
for (;;) for (;;)
{ {
@ -1040,9 +1038,10 @@ LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval)
extraWaits++; extraWaits++;
} }
TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(l), T_ID(l), LW_EXCLUSIVE); TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), T_ID(lock),
LW_EXCLUSIVE);
LOG_LWDEBUG("LWLockWaitForVar", T_NAME(l), T_ID(l), "awakened"); LOG_LWDEBUG("LWLockWaitForVar", T_NAME(lock), T_ID(lock), "awakened");
/* Now loop back and check the status of the lock again. */ /* Now loop back and check the status of the lock again. */
} }
@ -1050,7 +1049,7 @@ LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval)
/* We are done updating shared state of the lock itself. */ /* We are done updating shared state of the lock itself. */
SpinLockRelease(&lock->mutex); SpinLockRelease(&lock->mutex);
TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(l), T_ID(l), LW_EXCLUSIVE); TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), T_ID(lock), LW_EXCLUSIVE);
/* /*
* Fix the process wait semaphore's count for any absorbed wakeups. * Fix the process wait semaphore's count for any absorbed wakeups.
@ -1078,10 +1077,8 @@ LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval)
* The caller must be holding the lock in exclusive mode. * The caller must be holding the lock in exclusive mode.
*/ */
void void
LWLockUpdateVar(LWLock *l, uint64 *valptr, uint64 val) LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
{ {
volatile LWLock *lock = l;
volatile uint64 *valp = valptr;
PGPROC *head; PGPROC *head;
PGPROC *proc; PGPROC *proc;
PGPROC *next; PGPROC *next;
@ -1093,7 +1090,7 @@ LWLockUpdateVar(LWLock *l, uint64 *valptr, uint64 val)
Assert(lock->exclusive == 1); Assert(lock->exclusive == 1);
/* Update the lock's value */ /* Update the lock's value */
*valp = val; *valptr = val;
/* /*
* See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
@ -1139,9 +1136,8 @@ LWLockUpdateVar(LWLock *l, uint64 *valptr, uint64 val)
* LWLockRelease - release a previously acquired lock * LWLockRelease - release a previously acquired lock
*/ */
void void
LWLockRelease(LWLock *l) LWLockRelease(LWLock *lock)
{ {
volatile LWLock *lock = l;
PGPROC *head; PGPROC *head;
PGPROC *proc; PGPROC *proc;
int i; int i;
@ -1154,11 +1150,11 @@ LWLockRelease(LWLock *l)
*/ */
for (i = num_held_lwlocks; --i >= 0;) for (i = num_held_lwlocks; --i >= 0;)
{ {
if (l == held_lwlocks[i]) if (lock == held_lwlocks[i])
break; break;
} }
if (i < 0) if (i < 0)
elog(ERROR, "lock %s %d is not held", T_NAME(l), T_ID(l)); elog(ERROR, "lock %s %d is not held", T_NAME(lock), T_ID(lock));
num_held_lwlocks--; num_held_lwlocks--;
for (; i < num_held_lwlocks; i++) for (; i < num_held_lwlocks; i++)
held_lwlocks[i] = held_lwlocks[i + 1]; held_lwlocks[i] = held_lwlocks[i + 1];
@ -1238,14 +1234,15 @@ LWLockRelease(LWLock *l)
/* We are done updating shared state of the lock itself. */ /* We are done updating shared state of the lock itself. */
SpinLockRelease(&lock->mutex); SpinLockRelease(&lock->mutex);
TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(l), T_ID(l)); TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock), T_ID(lock));
/* /*
* Awaken any waiters I removed from the queue. * Awaken any waiters I removed from the queue.
*/ */
while (head != NULL) while (head != NULL)
{ {
LOG_LWDEBUG("LWLockRelease", T_NAME(l), T_ID(l), "release waiter"); LOG_LWDEBUG("LWLockRelease", T_NAME(lock), T_ID(lock),
"release waiter");
proc = head; proc = head;
head = proc->lwWaitLink; head = proc->lwWaitLink;
proc->lwWaitLink = NULL; proc->lwWaitLink = NULL;