2001-09-29 06:02:27 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* lwlock.c
|
|
|
|
* Lightweight lock manager
|
|
|
|
*
|
|
|
|
* Lightweight locks are intended primarily to provide mutual exclusion of
|
|
|
|
* access to shared-memory data structures. Therefore, they offer both
|
|
|
|
* exclusive and shared lock modes (to support read/write and read-only
|
2001-10-25 07:50:21 +02:00
|
|
|
* access to a shared object). There are few other frammishes. User-level
|
2001-09-29 06:02:27 +02:00
|
|
|
* locking should be done with the full lock manager --- which depends on
|
|
|
|
* an LWLock to protect its shared state.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2002-06-11 15:40:53 +02:00
|
|
|
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.11 2002/06/11 13:40:51 wieck Exp $
|
2001-09-29 06:02:27 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
#include "access/clog.h"
|
|
|
|
#include "storage/lwlock.h"
|
|
|
|
#include "storage/proc.h"
|
|
|
|
#include "storage/spin.h"
|
|
|
|
|
|
|
|
|
|
|
|
typedef struct LWLock
|
|
|
|
{
|
2002-06-11 15:40:53 +02:00
|
|
|
slock_t mutex; /* Protects LWLock and queue of PGPROCs */
|
2002-01-07 17:33:00 +01:00
|
|
|
bool releaseOK; /* T if ok to release waiters */
|
2001-09-29 06:02:27 +02:00
|
|
|
char exclusive; /* # of exclusive holders (0 or 1) */
|
|
|
|
int shared; /* # of shared holders (0..MaxBackends) */
|
2002-06-11 15:40:53 +02:00
|
|
|
PGPROC *head; /* head of list of waiting PGPROCs */
|
|
|
|
PGPROC *tail; /* tail of list of waiting PGPROCs */
|
2001-09-29 06:02:27 +02:00
|
|
|
/* tail is undefined when head is NULL */
|
|
|
|
} LWLock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This points to the array of LWLocks in shared memory. Backends inherit
|
|
|
|
* the pointer by fork from the postmaster. LWLockIds are indexes into
|
|
|
|
* the array.
|
|
|
|
*/
|
|
|
|
static LWLock *LWLockArray = NULL;
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
/* shared counter for dynamic allocation of LWLockIds */
|
2001-10-25 07:50:21 +02:00
|
|
|
static int *LWLockCounter;
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use this structure to keep track of locked LWLocks for release
|
|
|
|
* during error recovery. The maximum size could be determined at runtime
|
|
|
|
* if necessary, but it seems unlikely that more than a few locks could
|
|
|
|
* ever be held simultaneously.
|
|
|
|
*/
|
|
|
|
#define MAX_SIMUL_LWLOCKS 100
|
|
|
|
|
2001-10-25 07:50:21 +02:00
|
|
|
static int num_held_lwlocks = 0;
|
|
|
|
static LWLockId held_lwlocks[MAX_SIMUL_LWLOCKS];
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
#ifdef LOCK_DEBUG
|
|
|
|
bool Trace_lwlocks = false;
|
|
|
|
|
|
|
|
inline static void
|
2001-12-29 00:26:04 +01:00
|
|
|
PRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock)
|
2001-09-29 06:02:27 +02:00
|
|
|
{
|
|
|
|
if (Trace_lwlocks)
|
Commit to match discussed elog() changes. Only update is that LOG is
now just below FATAL in server_min_messages. Added more text to
highlight ordering difference between it and client_min_messages.
---------------------------------------------------------------------------
REALLYFATAL => PANIC
STOP => PANIC
New INFO level the prints to client by default
New LOG level the prints to server log by default
Cause VACUUM information to print only to the client
NOTICE => INFO where purely information messages are sent
DEBUG => LOG for purely server status messages
DEBUG removed, kept as backward compatible
DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1 added
DebugLvl removed in favor of new DEBUG[1-5] symbols
New server_min_messages GUC parameter with values:
DEBUG[5-1], INFO, NOTICE, ERROR, LOG, FATAL, PANIC
New client_min_messages GUC parameter with values:
DEBUG[5-1], LOG, INFO, NOTICE, ERROR, FATAL, PANIC
Server startup now logged with LOG instead of DEBUG
Remove debug_level GUC parameter
elog() numbers now start at 10
Add test to print error message if older elog() values are passed to elog()
Bootstrap mode now has a -d that requires an argument, like postmaster
2002-03-02 22:39:36 +01:00
|
|
|
elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",
|
2001-09-29 06:02:27 +02:00
|
|
|
where, (int) lockid,
|
2002-01-07 17:33:00 +01:00
|
|
|
(int) lock->exclusive, lock->shared, lock->head,
|
|
|
|
(int) lock->releaseOK);
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
|
2001-12-29 00:26:04 +01:00
|
|
|
inline static void
|
|
|
|
LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg)
|
|
|
|
{
|
|
|
|
if (Trace_lwlocks)
|
Commit to match discussed elog() changes. Only update is that LOG is
now just below FATAL in server_min_messages. Added more text to
highlight ordering difference between it and client_min_messages.
---------------------------------------------------------------------------
REALLYFATAL => PANIC
STOP => PANIC
New INFO level the prints to client by default
New LOG level the prints to server log by default
Cause VACUUM information to print only to the client
NOTICE => INFO where purely information messages are sent
DEBUG => LOG for purely server status messages
DEBUG removed, kept as backward compatible
DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1 added
DebugLvl removed in favor of new DEBUG[1-5] symbols
New server_min_messages GUC parameter with values:
DEBUG[5-1], INFO, NOTICE, ERROR, LOG, FATAL, PANIC
New client_min_messages GUC parameter with values:
DEBUG[5-1], LOG, INFO, NOTICE, ERROR, FATAL, PANIC
Server startup now logged with LOG instead of DEBUG
Remove debug_level GUC parameter
elog() numbers now start at 10
Add test to print error message if older elog() values are passed to elog()
Bootstrap mode now has a -d that requires an argument, like postmaster
2002-03-02 22:39:36 +01:00
|
|
|
elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
|
2001-12-29 00:26:04 +01:00
|
|
|
}
|
|
|
|
|
2001-10-25 07:50:21 +02:00
|
|
|
#else /* not LOCK_DEBUG */
|
2001-09-29 06:02:27 +02:00
|
|
|
#define PRINT_LWDEBUG(a,b,c)
|
2001-12-29 00:26:04 +01:00
|
|
|
#define LOG_LWDEBUG(a,b,c)
|
2001-11-05 18:46:40 +01:00
|
|
|
#endif /* LOCK_DEBUG */
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute number of LWLocks to allocate.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
NumLWLocks(void)
|
|
|
|
{
|
2001-10-25 07:50:21 +02:00
|
|
|
int numLocks;
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
/*
|
2001-10-25 07:50:21 +02:00
|
|
|
* Possibly this logic should be spread out among the affected
|
|
|
|
* modules, the same way that shmem space estimation is done. But for
|
|
|
|
* now, there are few enough users of LWLocks that we can get away
|
|
|
|
* with just keeping the knowledge here.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Predefined LWLocks */
|
|
|
|
numLocks = (int) NumFixedLWLocks;
|
|
|
|
|
|
|
|
/* bufmgr.c needs two for each shared buffer */
|
|
|
|
numLocks += 2 * NBuffers;
|
|
|
|
|
|
|
|
/* clog.c needs one per CLOG buffer */
|
|
|
|
numLocks += NUM_CLOG_BUFFERS;
|
|
|
|
|
|
|
|
/* Perhaps create a few more for use by user-defined modules? */
|
|
|
|
|
|
|
|
return numLocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute shmem space needed for LWLocks.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
LWLockShmemSize(void)
|
|
|
|
{
|
2001-10-25 07:50:21 +02:00
|
|
|
int numLocks = NumLWLocks();
|
|
|
|
uint32 spaceLocks;
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
/* Allocate the LWLocks plus space for shared allocation counter. */
|
|
|
|
spaceLocks = numLocks * sizeof(LWLock) + 2 * sizeof(int);
|
|
|
|
spaceLocks = MAXALIGN(spaceLocks);
|
|
|
|
|
|
|
|
return (int) spaceLocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate shmem space for LWLocks and initialize the locks.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
CreateLWLocks(void)
|
|
|
|
{
|
2001-10-25 07:50:21 +02:00
|
|
|
int numLocks = NumLWLocks();
|
|
|
|
uint32 spaceLocks = LWLockShmemSize();
|
|
|
|
LWLock *lock;
|
|
|
|
int id;
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
/* Allocate space */
|
|
|
|
LWLockArray = (LWLock *) ShmemAlloc(spaceLocks);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize all LWLocks to "unlocked" state
|
|
|
|
*/
|
|
|
|
for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
|
|
|
|
{
|
|
|
|
SpinLockInit(&lock->mutex);
|
2002-01-07 17:33:00 +01:00
|
|
|
lock->releaseOK = true;
|
2001-09-29 06:02:27 +02:00
|
|
|
lock->exclusive = 0;
|
|
|
|
lock->shared = 0;
|
|
|
|
lock->head = NULL;
|
|
|
|
lock->tail = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the dynamic-allocation counter at the end of the array
|
|
|
|
*/
|
|
|
|
LWLockCounter = (int *) lock;
|
|
|
|
LWLockCounter[0] = (int) NumFixedLWLocks;
|
|
|
|
LWLockCounter[1] = numLocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* LWLockAssign - assign a dynamically-allocated LWLock number
|
|
|
|
*
|
|
|
|
* NB: we do not currently try to interlock this. Could perhaps use
|
|
|
|
* ShmemLock spinlock if there were any need to assign LWLockIds after
|
|
|
|
* shmem setup.
|
|
|
|
*/
|
|
|
|
LWLockId
|
|
|
|
LWLockAssign(void)
|
|
|
|
{
|
|
|
|
if (LWLockCounter[0] >= LWLockCounter[1])
|
|
|
|
elog(FATAL, "No more LWLockIds available");
|
|
|
|
return (LWLockId) (LWLockCounter[0]++);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* LWLockAcquire - acquire a lightweight lock in the specified mode
|
|
|
|
*
|
|
|
|
* If the lock is not available, sleep until it is.
|
|
|
|
*
|
|
|
|
* Side effect: cancel/die interrupts are held off until lock release.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
|
|
|
{
|
2001-12-10 22:13:50 +01:00
|
|
|
volatile LWLock *lock = LWLockArray + lockid;
|
2002-06-11 15:40:53 +02:00
|
|
|
PGPROC *proc = MyProc;
|
2002-01-07 17:33:00 +01:00
|
|
|
bool retry = false;
|
|
|
|
int extraWaits = 0;
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
PRINT_LWDEBUG("LWLockAcquire", lockid, lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock out cancel/die interrupts until we exit the code section
|
|
|
|
* protected by the LWLock. This ensures that interrupts will not
|
|
|
|
* interfere with manipulations of data structures in shared memory.
|
|
|
|
*/
|
|
|
|
HOLD_INTERRUPTS();
|
|
|
|
|
2002-01-07 17:33:00 +01:00
|
|
|
/*
|
|
|
|
* Loop here to try to acquire lock after each time we are signaled
|
|
|
|
* by LWLockRelease.
|
|
|
|
*
|
|
|
|
* NOTE: it might seem better to have LWLockRelease actually grant us
|
|
|
|
* the lock, rather than retrying and possibly having to go back to
|
|
|
|
* sleep. But in practice that is no good because it means a process
|
|
|
|
* swap for every lock acquisition when two or more processes are
|
|
|
|
* contending for the same lock. Since LWLocks are normally used to
|
|
|
|
* protect not-very-long sections of computation, a process needs to
|
|
|
|
* be able to acquire and release the same lock many times during a
|
|
|
|
* single CPU time slice, even in the presence of contention. The
|
|
|
|
* efficiency of being able to do that outweighs the inefficiency of
|
|
|
|
* sometimes wasting a process dispatch cycle because the lock is not
|
|
|
|
* free when a released waiter finally gets to run. See pgsql-hackers
|
|
|
|
* archives for 29-Dec-01.
|
|
|
|
*/
|
|
|
|
for (;;)
|
2001-12-29 22:30:32 +01:00
|
|
|
{
|
2002-01-07 17:33:00 +01:00
|
|
|
bool mustwait;
|
|
|
|
|
|
|
|
/* Acquire mutex. Time spent holding mutex should be short! */
|
|
|
|
SpinLockAcquire_NoHoldoff(&lock->mutex);
|
|
|
|
|
|
|
|
/* If retrying, allow LWLockRelease to release waiters again */
|
|
|
|
if (retry)
|
|
|
|
lock->releaseOK = true;
|
|
|
|
|
|
|
|
/* If I can get the lock, do so quickly. */
|
|
|
|
if (mode == LW_EXCLUSIVE)
|
2001-09-29 06:02:27 +02:00
|
|
|
{
|
2002-01-07 17:33:00 +01:00
|
|
|
if (lock->exclusive == 0 && lock->shared == 0)
|
|
|
|
{
|
|
|
|
lock->exclusive++;
|
|
|
|
mustwait = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
mustwait = true;
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2002-01-07 17:33:00 +01:00
|
|
|
if (lock->exclusive == 0)
|
|
|
|
{
|
|
|
|
lock->shared++;
|
|
|
|
mustwait = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
mustwait = true;
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
|
2002-01-07 17:33:00 +01:00
|
|
|
if (!mustwait)
|
|
|
|
break; /* got the lock */
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
/*
|
2002-01-07 17:33:00 +01:00
|
|
|
* Add myself to wait queue.
|
|
|
|
*
|
2002-06-11 15:40:53 +02:00
|
|
|
* If we don't have a PGPROC structure, there's no way to wait. This
|
2001-10-25 07:50:21 +02:00
|
|
|
* should never occur, since MyProc should only be null during
|
|
|
|
* shared memory initialization.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
|
|
|
if (proc == NULL)
|
2002-06-11 15:40:53 +02:00
|
|
|
elog(FATAL, "LWLockAcquire: can't wait without a PGPROC structure");
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
proc->lwWaiting = true;
|
|
|
|
proc->lwExclusive = (mode == LW_EXCLUSIVE);
|
|
|
|
proc->lwWaitLink = NULL;
|
|
|
|
if (lock->head == NULL)
|
|
|
|
lock->head = proc;
|
|
|
|
else
|
|
|
|
lock->tail->lwWaitLink = proc;
|
|
|
|
lock->tail = proc;
|
|
|
|
|
|
|
|
/* Can release the mutex now */
|
|
|
|
SpinLockRelease_NoHoldoff(&lock->mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait until awakened.
|
|
|
|
*
|
|
|
|
* Since we share the process wait semaphore with the regular lock
|
2001-10-25 07:50:21 +02:00
|
|
|
* manager and ProcWaitForSignal, and we may need to acquire an
|
2002-01-07 17:33:00 +01:00
|
|
|
* LWLock while one of those is pending, it is possible that we get
|
|
|
|
* awakened for a reason other than being signaled by LWLockRelease.
|
|
|
|
* If so, loop back and wait again. Once we've gotten the LWLock,
|
2001-10-25 07:50:21 +02:00
|
|
|
* re-increment the sema by the number of additional signals
|
|
|
|
* received, so that the lock manager or signal manager will see
|
|
|
|
* the received signal when it next waits.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
2001-12-29 00:26:04 +01:00
|
|
|
LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
|
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
/* "false" means cannot accept cancel/die interrupt here. */
|
2002-05-05 02:03:29 +02:00
|
|
|
PGSemaphoreLock(&proc->sem, false);
|
2001-09-29 06:02:27 +02:00
|
|
|
if (!proc->lwWaiting)
|
|
|
|
break;
|
|
|
|
extraWaits++;
|
|
|
|
}
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2001-12-29 00:26:04 +01:00
|
|
|
LOG_LWDEBUG("LWLockAcquire", lockid, "awakened");
|
|
|
|
|
2002-01-07 17:33:00 +01:00
|
|
|
/* Now loop back and try to acquire lock again. */
|
|
|
|
retry = true;
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
2001-12-29 22:28:18 +01:00
|
|
|
|
2002-01-07 17:33:00 +01:00
|
|
|
/* We are done updating shared state of the lock itself. */
|
|
|
|
SpinLockRelease_NoHoldoff(&lock->mutex);
|
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
/* Add lock to list of locks held by this backend */
|
|
|
|
Assert(num_held_lwlocks < MAX_SIMUL_LWLOCKS);
|
|
|
|
held_lwlocks[num_held_lwlocks++] = lockid;
|
2002-01-07 17:33:00 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fix the process wait semaphore's count for any absorbed wakeups.
|
|
|
|
*/
|
|
|
|
while (extraWaits-- > 0)
|
2002-05-05 02:03:29 +02:00
|
|
|
PGSemaphoreUnlock(&proc->sem);
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
|
|
|
|
*
|
|
|
|
* If the lock is not available, return FALSE with no side-effects.
|
|
|
|
*
|
|
|
|
* If successful, cancel/die interrupts are held off until lock release.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
|
|
|
|
{
|
2001-12-10 22:13:50 +01:00
|
|
|
volatile LWLock *lock = LWLockArray + lockid;
|
2001-10-25 07:50:21 +02:00
|
|
|
bool mustwait;
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock out cancel/die interrupts until we exit the code section
|
|
|
|
* protected by the LWLock. This ensures that interrupts will not
|
|
|
|
* interfere with manipulations of data structures in shared memory.
|
|
|
|
*/
|
|
|
|
HOLD_INTERRUPTS();
|
|
|
|
|
|
|
|
/* Acquire mutex. Time spent holding mutex should be short! */
|
|
|
|
SpinLockAcquire_NoHoldoff(&lock->mutex);
|
|
|
|
|
|
|
|
/* If I can get the lock, do so quickly. */
|
|
|
|
if (mode == LW_EXCLUSIVE)
|
|
|
|
{
|
|
|
|
if (lock->exclusive == 0 && lock->shared == 0)
|
|
|
|
{
|
|
|
|
lock->exclusive++;
|
|
|
|
mustwait = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
mustwait = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2002-01-07 17:33:00 +01:00
|
|
|
if (lock->exclusive == 0)
|
2001-09-29 06:02:27 +02:00
|
|
|
{
|
|
|
|
lock->shared++;
|
|
|
|
mustwait = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
mustwait = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We are done updating shared state of the lock itself. */
|
|
|
|
SpinLockRelease_NoHoldoff(&lock->mutex);
|
|
|
|
|
|
|
|
if (mustwait)
|
|
|
|
{
|
|
|
|
/* Failed to get lock, so release interrupt holdoff */
|
|
|
|
RESUME_INTERRUPTS();
|
2001-12-29 00:26:04 +01:00
|
|
|
LOG_LWDEBUG("LWLockConditionalAcquire", lockid, "failed");
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Add lock to list of locks held by this backend */
|
|
|
|
Assert(num_held_lwlocks < MAX_SIMUL_LWLOCKS);
|
|
|
|
held_lwlocks[num_held_lwlocks++] = lockid;
|
|
|
|
}
|
|
|
|
|
|
|
|
return !mustwait;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* LWLockRelease - release a previously acquired lock
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
LWLockRelease(LWLockId lockid)
|
|
|
|
{
|
2001-12-10 22:13:50 +01:00
|
|
|
volatile LWLock *lock = LWLockArray + lockid;
|
2002-06-11 15:40:53 +02:00
|
|
|
PGPROC *head;
|
|
|
|
PGPROC *proc;
|
2001-10-25 07:50:21 +02:00
|
|
|
int i;
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
|
|
|
|
|
|
|
|
/*
|
2001-10-25 07:50:21 +02:00
|
|
|
* Remove lock from list of locks held. Usually, but not always, it
|
|
|
|
* will be the latest-acquired lock; so search array backwards.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
2001-10-25 07:50:21 +02:00
|
|
|
for (i = num_held_lwlocks; --i >= 0;)
|
2001-09-29 06:02:27 +02:00
|
|
|
{
|
|
|
|
if (lockid == held_lwlocks[i])
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i < 0)
|
|
|
|
elog(ERROR, "LWLockRelease: lock %d is not held", (int) lockid);
|
|
|
|
num_held_lwlocks--;
|
|
|
|
for (; i < num_held_lwlocks; i++)
|
2001-10-25 07:50:21 +02:00
|
|
|
held_lwlocks[i] = held_lwlocks[i + 1];
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
/* Acquire mutex. Time spent holding mutex should be short! */
|
|
|
|
SpinLockAcquire_NoHoldoff(&lock->mutex);
|
|
|
|
|
|
|
|
/* Release my hold on lock */
|
|
|
|
if (lock->exclusive > 0)
|
|
|
|
lock->exclusive--;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
Assert(lock->shared > 0);
|
|
|
|
lock->shared--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-10-25 07:50:21 +02:00
|
|
|
* See if I need to awaken any waiters. If I released a non-last
|
2002-01-07 17:33:00 +01:00
|
|
|
* shared hold, there cannot be anything to do. Also, do not awaken
|
|
|
|
* any waiters if someone has already awakened waiters that haven't
|
|
|
|
* yet acquired the lock.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
|
|
|
head = lock->head;
|
|
|
|
if (head != NULL)
|
|
|
|
{
|
2002-01-07 17:33:00 +01:00
|
|
|
if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
|
2001-09-29 06:02:27 +02:00
|
|
|
{
|
|
|
|
/*
|
2002-06-11 15:40:53 +02:00
|
|
|
* Remove the to-be-awakened PGPROCs from the queue. If the
|
2002-01-07 17:33:00 +01:00
|
|
|
* front waiter wants exclusive lock, awaken him only.
|
|
|
|
* Otherwise awaken as many waiters as want shared access.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
|
|
|
proc = head;
|
2002-01-07 17:33:00 +01:00
|
|
|
if (!proc->lwExclusive)
|
2001-09-29 06:02:27 +02:00
|
|
|
{
|
|
|
|
while (proc->lwWaitLink != NULL &&
|
|
|
|
!proc->lwWaitLink->lwExclusive)
|
|
|
|
{
|
|
|
|
proc = proc->lwWaitLink;
|
|
|
|
}
|
|
|
|
}
|
2002-06-11 15:40:53 +02:00
|
|
|
/* proc is now the last PGPROC to be released */
|
2001-09-29 06:02:27 +02:00
|
|
|
lock->head = proc->lwWaitLink;
|
|
|
|
proc->lwWaitLink = NULL;
|
2002-01-07 17:33:00 +01:00
|
|
|
/* prevent additional wakeups until retryer gets to run */
|
|
|
|
lock->releaseOK = false;
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* lock is still held, can't awaken anything */
|
|
|
|
head = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We are done updating shared state of the lock itself. */
|
|
|
|
SpinLockRelease_NoHoldoff(&lock->mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Awaken any waiters I removed from the queue.
|
|
|
|
*/
|
|
|
|
while (head != NULL)
|
|
|
|
{
|
2001-12-29 00:26:04 +01:00
|
|
|
LOG_LWDEBUG("LWLockRelease", lockid, "release waiter");
|
2001-09-29 06:02:27 +02:00
|
|
|
proc = head;
|
|
|
|
head = proc->lwWaitLink;
|
|
|
|
proc->lwWaitLink = NULL;
|
|
|
|
proc->lwWaiting = false;
|
2002-05-05 02:03:29 +02:00
|
|
|
PGSemaphoreUnlock(&proc->sem);
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now okay to allow cancel/die interrupts.
|
|
|
|
*/
|
|
|
|
RESUME_INTERRUPTS();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* LWLockReleaseAll - release all currently-held locks
|
|
|
|
*
|
2001-10-25 07:50:21 +02:00
|
|
|
* Used to clean up after elog(ERROR). An important difference between this
|
2001-09-29 06:02:27 +02:00
|
|
|
* function and retail LWLockRelease calls is that InterruptHoldoffCount is
|
|
|
|
* unchanged by this operation. This is necessary since InterruptHoldoffCount
|
2001-10-25 07:50:21 +02:00
|
|
|
* has been set to an appropriate level earlier in error recovery. We could
|
2001-09-29 06:02:27 +02:00
|
|
|
* decrement it below zero if we allow it to drop for each released lock!
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
LWLockReleaseAll(void)
|
|
|
|
{
|
|
|
|
while (num_held_lwlocks > 0)
|
|
|
|
{
|
|
|
|
HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
|
|
|
|
|
2001-10-25 07:50:21 +02:00
|
|
|
LWLockRelease(held_lwlocks[num_held_lwlocks - 1]);
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
}
|