postgresql/src/backend/storage/lmgr/proc.c

1825 lines
55 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* proc.c
* routines to manage per-process shared memory data structure
*
* Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
2010-09-20 22:08:53 +02:00
* src/backend/storage/lmgr/proc.c
*
*-------------------------------------------------------------------------
*/
/*
* Interface (a):
* ProcSleep(), ProcWakeup(),
* ProcQueueAlloc() -- create a shm queue for sleeping processes
* ProcQueueInit() -- create a queue without allocing memory
*
* Waiting for a lock causes the backend to be put to sleep. Whoever releases
* the lock wakes the process up again (and gives it an error code so it knows
* whether it was awoken on an error condition).
*
* Interface (b):
*
* ProcReleaseLocks -- frees the locks associated with current transaction
*
* ProcKill -- destroys the shared memory state (and locks)
* associated with the process.
*/
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include <sys/time.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/xact.h"
#include "miscadmin.h"
#include "postmaster/autovacuum.h"
#include "replication/slot.h"
#include "replication/syncrep.h"
#include "storage/ipc.h"
#include "storage/lmgr.h"
#include "storage/pmsignal.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/procsignal.h"
#include "storage/spin.h"
#include "utils/timeout.h"
#include "utils/timestamp.h"
/* GUC variables */
2001-03-22 05:01:46 +01:00
int DeadlockTimeout = 1000;
int StatementTimeout = 0;
int LockTimeout = 0;
bool log_lock_waits = false;
/* Pointer to this process's PGPROC and PGXACT structs, if any */
PGPROC *MyProc = NULL;
PGXACT *MyPgXact = NULL;
/*
* This spinlock protects the freelist of recycled PGPROC structures.
* We cannot use an LWLock because the LWLock manager depends on already
* having a PGPROC and a wait semaphore! But these structures are touched
* relatively infrequently (only at backend startup or shutdown) and not for
* very long, so a spinlock is okay.
*/
NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
/* Pointers to shared-memory structures */
PROC_HDR *ProcGlobal = NULL;
NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
PGPROC *PreparedXactProcs = NULL;
/* If we are waiting for a lock, this points to the associated LOCALLOCK */
static LOCALLOCK *lockAwaited = NULL;
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
/* Is a deadlock check pending? */
static volatile sig_atomic_t got_deadlock_timeout;
static void RemoveProcFromArray(int code, Datum arg);
static void ProcKill(int code, Datum arg);
static void AuxiliaryProcKill(int code, Datum arg);
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
static void CheckDeadLock(void);
/*
* Report shared-memory space needed by InitProcGlobal.
*/
Size
ProcGlobalShmemSize(void)
{
Size size = 0;
/* ProcGlobal */
size = add_size(size, sizeof(PROC_HDR));
/* MyProcs, including autovacuum workers and launcher */
size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
/* AuxiliaryProcs */
size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC)));
/* Prepared xacts */
size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGPROC)));
/* ProcStructLock */
size = add_size(size, sizeof(slock_t));
size = add_size(size, mul_size(MaxBackends, sizeof(PGXACT)));
size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGXACT)));
size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGXACT)));
return size;
}
/*
* Report number of semaphores needed by InitProcGlobal.
*/
int
ProcGlobalSemas(void)
{
/*
* We need a sema per backend (including autovacuum), plus one for each
* auxiliary process.
*/
return MaxBackends + NUM_AUXILIARY_PROCS;
}
/*
* InitProcGlobal -
* Initialize the global process table during postmaster or standalone
* backend startup.
*
* We also create all the per-process semaphores we will need to support
* the requested number of backends. We used to allocate semaphores
* only when backends were actually started up, but that is bad because
* it lets Postgres fail under load --- a lot of Unix systems are
* (mis)configured with small limits on the number of semaphores, and
* running out when trying to start another backend is a common failure.
* So, now we grab enough semaphores to support the desired max number
* of backends immediately at initialization --- if the sysadmin has set
* MaxConnections, max_worker_processes, or autovacuum_max_workers higher
* than his kernel will support, he'll find out sooner rather than later.
*
* Another reason for creating semaphores here is that the semaphore
* implementation typically requires us to create semaphores in the
* postmaster, not in backends.
*
* Note: this is NOT called by individual backends under a postmaster,
* not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
* pointers must be propagated specially for EXEC_BACKEND operation.
*/
void
InitProcGlobal(void)
{
PGPROC *procs;
PGXACT *pgxacts;
int i,
j;
bool found;
uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
/* Create the ProcGlobal shared structure */
ProcGlobal = (PROC_HDR *)
ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
Assert(!found);
/*
* Initialize the data structures.
*/
ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
ProcGlobal->freeProcs = NULL;
ProcGlobal->autovacFreeProcs = NULL;
Background worker processes Background workers are postmaster subprocesses that run arbitrary user-specified code. They can request shared memory access as well as backend database connections; or they can just use plain libpq frontend database connections. Modules listed in shared_preload_libraries can register background workers in their _PG_init() function; this is early enough that it's not necessary to provide an extra GUC option, because the necessary extra resources can be allocated early on. Modules can install more than one bgworker, if necessary. Care is taken that these extra processes do not interfere with other postmaster tasks: only one such process is started on each ServerLoop iteration. This means a large number of them could be waiting to be started up and postmaster is still able to quickly service external connection requests. Also, shutdown sequence should not be impacted by a worker process that's reasonably well behaved (i.e. promptly responds to termination signals.) The current implementation lets worker processes specify their start time, i.e. at what point in the server startup process they are to be started: right after postmaster start (in which case they mustn't ask for shared memory access), when consistent state has been reached (useful during recovery in a HOT standby server), or when recovery has terminated (i.e. when normal backends are allowed). In case of a bgworker crash, actions to take depend on registration data: if shared memory was requested, then all other connections are taken down (as well as other bgworkers), just like it were a regular backend crashing. The bgworker itself is restarted, too, within a configurable timeframe (which can be configured to be never). More features to add to this framework can be imagined without much effort, and have been discussed, but this seems good enough as a useful unit already. An elementary sample module is supplied. Author: Álvaro Herrera This patch is loosely based on prior patches submitted by KaiGai Kohei, and unsubmitted code by Simon Riggs. Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund, Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
ProcGlobal->bgworkerFreeProcs = NULL;
ProcGlobal->startupProc = NULL;
ProcGlobal->startupProcPid = 0;
ProcGlobal->startupBufferPinWaitBufId = -1;
ProcGlobal->walwriterLatch = NULL;
ProcGlobal->checkpointerLatch = NULL;
pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PGPROCNO);
/*
* Create and initialize all the PGPROC structures we'll need. There are
Background worker processes Background workers are postmaster subprocesses that run arbitrary user-specified code. They can request shared memory access as well as backend database connections; or they can just use plain libpq frontend database connections. Modules listed in shared_preload_libraries can register background workers in their _PG_init() function; this is early enough that it's not necessary to provide an extra GUC option, because the necessary extra resources can be allocated early on. Modules can install more than one bgworker, if necessary. Care is taken that these extra processes do not interfere with other postmaster tasks: only one such process is started on each ServerLoop iteration. This means a large number of them could be waiting to be started up and postmaster is still able to quickly service external connection requests. Also, shutdown sequence should not be impacted by a worker process that's reasonably well behaved (i.e. promptly responds to termination signals.) The current implementation lets worker processes specify their start time, i.e. at what point in the server startup process they are to be started: right after postmaster start (in which case they mustn't ask for shared memory access), when consistent state has been reached (useful during recovery in a HOT standby server), or when recovery has terminated (i.e. when normal backends are allowed). In case of a bgworker crash, actions to take depend on registration data: if shared memory was requested, then all other connections are taken down (as well as other bgworkers), just like it were a regular backend crashing. The bgworker itself is restarted, too, within a configurable timeframe (which can be configured to be never). More features to add to this framework can be imagined without much effort, and have been discussed, but this seems good enough as a useful unit already. An elementary sample module is supplied. Author: Álvaro Herrera This patch is loosely based on prior patches submitted by KaiGai Kohei, and unsubmitted code by Simon Riggs. Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund, Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
* five separate consumers: (1) normal backends, (2) autovacuum workers
* and the autovacuum launcher, (3) background workers, (4) auxiliary
* processes, and (5) prepared transactions. Each PGPROC structure is
* dedicated to exactly one of these purposes, and they do not move
* between groups.
*/
procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
ProcGlobal->allProcs = procs;
/* XXX allProcCount isn't really all of them; it excludes prepared xacts */
ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
if (!procs)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory")));
MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
/*
* Also allocate a separate array of PGXACT structures. This is separate
* from the main PGPROC array so that the most heavily accessed data is
* stored contiguously in memory in as few cache lines as possible. This
* provides significant performance benefits, especially on a
* multiprocessor system. There is one PGXACT structure for every PGPROC
* structure.
*/
pgxacts = (PGXACT *) ShmemAlloc(TotalProcs * sizeof(PGXACT));
MemSet(pgxacts, 0, TotalProcs * sizeof(PGXACT));
ProcGlobal->allPgXact = pgxacts;
for (i = 0; i < TotalProcs; i++)
{
/* Common initialization for all PGPROCs, regardless of type. */
/*
* Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
* dummy PGPROCs don't need these though - they're never associated
* with a real process
*/
if (i < MaxBackends + NUM_AUXILIARY_PROCS)
{
PGSemaphoreCreate(&(procs[i].sem));
InitSharedLatch(&(procs[i].procLatch));
LWLockInitialize(&(procs[i].backendLock), LWTRANCHE_PROC);
}
procs[i].pgprocno = i;
/*
Background worker processes Background workers are postmaster subprocesses that run arbitrary user-specified code. They can request shared memory access as well as backend database connections; or they can just use plain libpq frontend database connections. Modules listed in shared_preload_libraries can register background workers in their _PG_init() function; this is early enough that it's not necessary to provide an extra GUC option, because the necessary extra resources can be allocated early on. Modules can install more than one bgworker, if necessary. Care is taken that these extra processes do not interfere with other postmaster tasks: only one such process is started on each ServerLoop iteration. This means a large number of them could be waiting to be started up and postmaster is still able to quickly service external connection requests. Also, shutdown sequence should not be impacted by a worker process that's reasonably well behaved (i.e. promptly responds to termination signals.) The current implementation lets worker processes specify their start time, i.e. at what point in the server startup process they are to be started: right after postmaster start (in which case they mustn't ask for shared memory access), when consistent state has been reached (useful during recovery in a HOT standby server), or when recovery has terminated (i.e. when normal backends are allowed). In case of a bgworker crash, actions to take depend on registration data: if shared memory was requested, then all other connections are taken down (as well as other bgworkers), just like it were a regular backend crashing. The bgworker itself is restarted, too, within a configurable timeframe (which can be configured to be never). More features to add to this framework can be imagined without much effort, and have been discussed, but this seems good enough as a useful unit already. An elementary sample module is supplied. Author: Álvaro Herrera This patch is loosely based on prior patches submitted by KaiGai Kohei, and unsubmitted code by Simon Riggs. Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund, Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
* Newly created PGPROCs for normal backends, autovacuum and bgworkers
* must be queued up on the appropriate free list. Because there can
Background worker processes Background workers are postmaster subprocesses that run arbitrary user-specified code. They can request shared memory access as well as backend database connections; or they can just use plain libpq frontend database connections. Modules listed in shared_preload_libraries can register background workers in their _PG_init() function; this is early enough that it's not necessary to provide an extra GUC option, because the necessary extra resources can be allocated early on. Modules can install more than one bgworker, if necessary. Care is taken that these extra processes do not interfere with other postmaster tasks: only one such process is started on each ServerLoop iteration. This means a large number of them could be waiting to be started up and postmaster is still able to quickly service external connection requests. Also, shutdown sequence should not be impacted by a worker process that's reasonably well behaved (i.e. promptly responds to termination signals.) The current implementation lets worker processes specify their start time, i.e. at what point in the server startup process they are to be started: right after postmaster start (in which case they mustn't ask for shared memory access), when consistent state has been reached (useful during recovery in a HOT standby server), or when recovery has terminated (i.e. when normal backends are allowed). In case of a bgworker crash, actions to take depend on registration data: if shared memory was requested, then all other connections are taken down (as well as other bgworkers), just like it were a regular backend crashing. The bgworker itself is restarted, too, within a configurable timeframe (which can be configured to be never). More features to add to this framework can be imagined without much effort, and have been discussed, but this seems good enough as a useful unit already. An elementary sample module is supplied. Author: Álvaro Herrera This patch is loosely based on prior patches submitted by KaiGai Kohei, and unsubmitted code by Simon Riggs. Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund, Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
* only ever be a small, fixed number of auxiliary processes, no free
* list is used in that case; InitAuxiliaryProcess() instead uses a
* linear search. PGPROCs for prepared transactions are added to a
Background worker processes Background workers are postmaster subprocesses that run arbitrary user-specified code. They can request shared memory access as well as backend database connections; or they can just use plain libpq frontend database connections. Modules listed in shared_preload_libraries can register background workers in their _PG_init() function; this is early enough that it's not necessary to provide an extra GUC option, because the necessary extra resources can be allocated early on. Modules can install more than one bgworker, if necessary. Care is taken that these extra processes do not interfere with other postmaster tasks: only one such process is started on each ServerLoop iteration. This means a large number of them could be waiting to be started up and postmaster is still able to quickly service external connection requests. Also, shutdown sequence should not be impacted by a worker process that's reasonably well behaved (i.e. promptly responds to termination signals.) The current implementation lets worker processes specify their start time, i.e. at what point in the server startup process they are to be started: right after postmaster start (in which case they mustn't ask for shared memory access), when consistent state has been reached (useful during recovery in a HOT standby server), or when recovery has terminated (i.e. when normal backends are allowed). In case of a bgworker crash, actions to take depend on registration data: if shared memory was requested, then all other connections are taken down (as well as other bgworkers), just like it were a regular backend crashing. The bgworker itself is restarted, too, within a configurable timeframe (which can be configured to be never). More features to add to this framework can be imagined without much effort, and have been discussed, but this seems good enough as a useful unit already. An elementary sample module is supplied. Author: Álvaro Herrera This patch is loosely based on prior patches submitted by KaiGai Kohei, and unsubmitted code by Simon Riggs. Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund, Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
* free list by TwoPhaseShmemInit().
*/
if (i < MaxConnections)
{
/* PGPROC for normal backend, add to freeProcs list */
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
ProcGlobal->freeProcs = &procs[i];
procs[i].procgloballist = &ProcGlobal->freeProcs;
}
Background worker processes Background workers are postmaster subprocesses that run arbitrary user-specified code. They can request shared memory access as well as backend database connections; or they can just use plain libpq frontend database connections. Modules listed in shared_preload_libraries can register background workers in their _PG_init() function; this is early enough that it's not necessary to provide an extra GUC option, because the necessary extra resources can be allocated early on. Modules can install more than one bgworker, if necessary. Care is taken that these extra processes do not interfere with other postmaster tasks: only one such process is started on each ServerLoop iteration. This means a large number of them could be waiting to be started up and postmaster is still able to quickly service external connection requests. Also, shutdown sequence should not be impacted by a worker process that's reasonably well behaved (i.e. promptly responds to termination signals.) The current implementation lets worker processes specify their start time, i.e. at what point in the server startup process they are to be started: right after postmaster start (in which case they mustn't ask for shared memory access), when consistent state has been reached (useful during recovery in a HOT standby server), or when recovery has terminated (i.e. when normal backends are allowed). In case of a bgworker crash, actions to take depend on registration data: if shared memory was requested, then all other connections are taken down (as well as other bgworkers), just like it were a regular backend crashing. The bgworker itself is restarted, too, within a configurable timeframe (which can be configured to be never). More features to add to this framework can be imagined without much effort, and have been discussed, but this seems good enough as a useful unit already. An elementary sample module is supplied. Author: Álvaro Herrera This patch is loosely based on prior patches submitted by KaiGai Kohei, and unsubmitted code by Simon Riggs. Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund, Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
else if (i < MaxConnections + autovacuum_max_workers + 1)
{
/* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
ProcGlobal->autovacFreeProcs = &procs[i];
procs[i].procgloballist = &ProcGlobal->autovacFreeProcs;
}
Background worker processes Background workers are postmaster subprocesses that run arbitrary user-specified code. They can request shared memory access as well as backend database connections; or they can just use plain libpq frontend database connections. Modules listed in shared_preload_libraries can register background workers in their _PG_init() function; this is early enough that it's not necessary to provide an extra GUC option, because the necessary extra resources can be allocated early on. Modules can install more than one bgworker, if necessary. Care is taken that these extra processes do not interfere with other postmaster tasks: only one such process is started on each ServerLoop iteration. This means a large number of them could be waiting to be started up and postmaster is still able to quickly service external connection requests. Also, shutdown sequence should not be impacted by a worker process that's reasonably well behaved (i.e. promptly responds to termination signals.) The current implementation lets worker processes specify their start time, i.e. at what point in the server startup process they are to be started: right after postmaster start (in which case they mustn't ask for shared memory access), when consistent state has been reached (useful during recovery in a HOT standby server), or when recovery has terminated (i.e. when normal backends are allowed). In case of a bgworker crash, actions to take depend on registration data: if shared memory was requested, then all other connections are taken down (as well as other bgworkers), just like it were a regular backend crashing. The bgworker itself is restarted, too, within a configurable timeframe (which can be configured to be never). More features to add to this framework can be imagined without much effort, and have been discussed, but this seems good enough as a useful unit already. An elementary sample module is supplied. Author: Álvaro Herrera This patch is loosely based on prior patches submitted by KaiGai Kohei, and unsubmitted code by Simon Riggs. Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund, Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
else if (i < MaxBackends)
{
/* PGPROC for bgworker, add to bgworkerFreeProcs list */
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
ProcGlobal->bgworkerFreeProcs = &procs[i];
procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
Background worker processes Background workers are postmaster subprocesses that run arbitrary user-specified code. They can request shared memory access as well as backend database connections; or they can just use plain libpq frontend database connections. Modules listed in shared_preload_libraries can register background workers in their _PG_init() function; this is early enough that it's not necessary to provide an extra GUC option, because the necessary extra resources can be allocated early on. Modules can install more than one bgworker, if necessary. Care is taken that these extra processes do not interfere with other postmaster tasks: only one such process is started on each ServerLoop iteration. This means a large number of them could be waiting to be started up and postmaster is still able to quickly service external connection requests. Also, shutdown sequence should not be impacted by a worker process that's reasonably well behaved (i.e. promptly responds to termination signals.) The current implementation lets worker processes specify their start time, i.e. at what point in the server startup process they are to be started: right after postmaster start (in which case they mustn't ask for shared memory access), when consistent state has been reached (useful during recovery in a HOT standby server), or when recovery has terminated (i.e. when normal backends are allowed). In case of a bgworker crash, actions to take depend on registration data: if shared memory was requested, then all other connections are taken down (as well as other bgworkers), just like it were a regular backend crashing. The bgworker itself is restarted, too, within a configurable timeframe (which can be configured to be never). More features to add to this framework can be imagined without much effort, and have been discussed, but this seems good enough as a useful unit already. An elementary sample module is supplied. Author: Álvaro Herrera This patch is loosely based on prior patches submitted by KaiGai Kohei, and unsubmitted code by Simon Riggs. Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund, Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
}
/* Initialize myProcLocks[] shared memory queues. */
for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
SHMQueueInit(&(procs[i].myProcLocks[j]));
/* Initialize lockGroupMembers list. */
dlist_init(&procs[i].lockGroupMembers);
}
/*
* Save pointers to the blocks of PGPROC structures reserved for auxiliary
* processes and prepared transactions.
*/
AuxiliaryProcs = &procs[MaxBackends];
PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
/* Create ProcStructLock spinlock, too */
ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
SpinLockInit(ProcStructLock);
}
/*
* InitProcess -- initialize a per-process data structure for this backend
*/
void
InitProcess(void)
{
PGPROC * volatile * procgloballist;
/*
* ProcGlobal should be set up already (if we are a backend, we inherit
* this by fork() or EXEC_BACKEND mechanism from the postmaster).
*/
if (ProcGlobal == NULL)
elog(PANIC, "proc header uninitialized");
if (MyProc != NULL)
elog(ERROR, "you already exist");
/* Decide which list should supply our PGPROC. */
if (IsAnyAutoVacuumProcess())
procgloballist = &ProcGlobal->autovacFreeProcs;
else if (IsBackgroundWorker)
procgloballist = &ProcGlobal->bgworkerFreeProcs;
else
procgloballist = &ProcGlobal->freeProcs;
/*
* Try to get a proc struct from the appropriate free list. If this
* fails, we must be out of PGPROC structures (not to mention semaphores).
*
2005-10-15 04:49:52 +02:00
* While we are holding the ProcStructLock, also copy the current shared
* estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
set_spins_per_delay(ProcGlobal->spins_per_delay);
MyProc = *procgloballist;
if (MyProc != NULL)
{
*procgloballist = (PGPROC *) MyProc->links.next;
SpinLockRelease(ProcStructLock);
}
else
{
/*
2005-10-15 04:49:52 +02:00
* If we reach here, all the PGPROCs are in use. This is one of the
* possible places to detect "too many backends", so give the standard
* error message. XXX do we need to give a different failure message
* in the autovacuum case?
*/
SpinLockRelease(ProcStructLock);
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
errmsg("sorry, too many clients already")));
}
MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
/*
* Cross-check that the PGPROC is of the type we expect; if this were
* not the case, it would get returned to the wrong list.
*/
Assert(MyProc->procgloballist == procgloballist);
/*
* Now that we have a PGPROC, mark ourselves as an active postmaster
* child; this is so that the postmaster can detect it if we exit without
* cleaning up. (XXX autovac launcher currently doesn't participate in
* this; it probably should.)
*/
if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
MarkPostmasterChildActive();
/*
* Initialize all fields of MyProc, except for those previously
* initialized by InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
MyProc->lxid = InvalidLocalTransactionId;
MyProc->fpVXIDLock = false;
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
MyPgXact->xid = InvalidTransactionId;
MyPgXact->xmin = InvalidTransactionId;
MyProc->pid = MyProcPid;
/* backendId, databaseId and roleId will be filled in later */
MyProc->backendId = InvalidBackendId;
MyProc->databaseId = InvalidOid;
MyProc->roleId = InvalidOid;
MyPgXact->delayChkpt = false;
MyPgXact->vacuumFlags = 0;
/* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
if (IsAutoVacuumWorkerProcess())
MyPgXact->vacuumFlags |= PROC_IS_AUTOVACUUM;
MyProc->lwWaiting = false;
MyProc->lwWaitMode = 0;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
#ifdef USE_ASSERT_CHECKING
{
int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
}
#endif
MyProc->recoveryConflictPending = false;
/* Initialize fields for sync rep */
MyProc->waitLSN = 0;
MyProc->syncRepState = SYNC_REP_NOT_WAITING;
SHMQueueElemInit(&(MyProc->syncRepLinks));
/* Initialize fields for group XID clearing. */
MyProc->procArrayGroupMember = false;
MyProc->procArrayGroupMemberXid = InvalidTransactionId;
pg_atomic_init_u32(&MyProc->procArrayGroupNext, INVALID_PGPROCNO);
/* Check that group locking fields are in a proper initial state. */
Assert(MyProc->lockGroupLeader == NULL);
Assert(dlist_is_empty(&MyProc->lockGroupMembers));
/* Initialize wait event information. */
MyProc->wait_event_info = 0;
/*
* Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
* on it. That allows us to repoint the process latch, which so far
* points to process local one, to the shared one.
*/
OwnLatch(&MyProc->procLatch);
SwitchToSharedLatch();
/*
* We might be reusing a semaphore that belonged to a failed process. So
* be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
/*
* Arrange to clean up at backend exit.
*/
on_shmem_exit(ProcKill, 0);
/*
2005-10-15 04:49:52 +02:00
* Now that we have a PGPROC, we could try to acquire locks, so initialize
* local state needed for LWLocks, and the deadlock checker.
*/
InitLWLockAccess();
InitDeadLockChecking();
}
/*
* InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
*
* This is separate from InitProcess because we can't acquire LWLocks until
* we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
* work until after we've done CreateSharedMemoryAndSemaphores.
*/
void
InitProcessPhase2(void)
{
Assert(MyProc != NULL);
/*
* Add our PGPROC to the PGPROC array in shared memory.
*/
ProcArrayAdd(MyProc);
/*
* Arrange to clean that up at backend exit.
*/
on_shmem_exit(RemoveProcFromArray, 0);
}
/*
* InitAuxiliaryProcess -- create a per-auxiliary-process data structure
*
* This is called by bgwriter and similar processes so that they will have a
* MyProc value that's real enough to let them wait for LWLocks. The PGPROC
* and sema that are assigned are one of the extra ones created during
* InitProcGlobal.
*
* Auxiliary processes are presently not expected to wait for real (lockmgr)
* locks, so we need not set up the deadlock checker. They are never added
* to the ProcArray or the sinval messaging mechanism, either. They also
* don't get a VXID assigned, since this is only useful when we actually
* hold lockmgr locks.
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
*
* Startup process however uses locks but never waits for them in the
* normal backend sense. Startup process also takes part in sinval messaging
* as a sendOnly process, so never reads messages from sinval queue. So
* Startup process does have a VXID and does show up in pg_locks.
*/
void
InitAuxiliaryProcess(void)
{
PGPROC *auxproc;
int proctype;
/*
* ProcGlobal should be set up already (if we are a backend, we inherit
* this by fork() or EXEC_BACKEND mechanism from the postmaster).
*/
if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
elog(PANIC, "proc header uninitialized");
if (MyProc != NULL)
elog(ERROR, "you already exist");
/*
* We use the ProcStructLock to protect assignment and releasing of
* AuxiliaryProcs entries.
*
2005-10-15 04:49:52 +02:00
* While we are holding the ProcStructLock, also copy the current shared
* estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
set_spins_per_delay(ProcGlobal->spins_per_delay);
/*
* Find a free auxproc ... *big* trouble if there isn't one ...
*/
for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
{
auxproc = &AuxiliaryProcs[proctype];
if (auxproc->pid == 0)
break;
}
if (proctype >= NUM_AUXILIARY_PROCS)
{
SpinLockRelease(ProcStructLock);
elog(FATAL, "all AuxiliaryProcs are in use");
}
/* Mark auxiliary proc as in use by me */
/* use volatile pointer to prevent code rearrangement */
((volatile PGPROC *) auxproc)->pid = MyProcPid;
MyProc = auxproc;
MyPgXact = &ProcGlobal->allPgXact[auxproc->pgprocno];
SpinLockRelease(ProcStructLock);
/*
* Initialize all fields of MyProc, except for those previously
* initialized by InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
MyProc->lxid = InvalidLocalTransactionId;
MyProc->fpVXIDLock = false;
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
MyPgXact->xid = InvalidTransactionId;
MyPgXact->xmin = InvalidTransactionId;
MyProc->backendId = InvalidBackendId;
MyProc->databaseId = InvalidOid;
MyProc->roleId = InvalidOid;
MyPgXact->delayChkpt = false;
MyPgXact->vacuumFlags = 0;
MyProc->lwWaiting = false;
MyProc->lwWaitMode = 0;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
#ifdef USE_ASSERT_CHECKING
{
int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
}
#endif
/*
* Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
* on it. That allows us to repoint the process latch, which so far
* points to process local one, to the shared one.
*/
OwnLatch(&MyProc->procLatch);
SwitchToSharedLatch();
/* Check that group locking fields are in a proper initial state. */
Assert(MyProc->lockGroupLeader == NULL);
Assert(dlist_is_empty(&MyProc->lockGroupMembers));
/*
2005-10-15 04:49:52 +02:00
* We might be reusing a semaphore that belonged to a failed process. So
* be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
/*
* Arrange to clean up at process exit.
*/
on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
}
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
/*
* Record the PID and PGPROC structures for the Startup process, for use in
* ProcSendSignal(). See comments there for further explanation.
*/
void
PublishStartupProcessInformation(void)
{
SpinLockAcquire(ProcStructLock);
ProcGlobal->startupProc = MyProc;
ProcGlobal->startupProcPid = MyProcPid;
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
SpinLockRelease(ProcStructLock);
}
/*
* Used from bufgr to share the value of the buffer that Startup waits on,
* or to reset the value to "not waiting" (-1). This allows processing
* of recovery conflicts for buffer pins. Set is made before backends look
* at this value, so locking not required, especially since the set is
* an atomic integer set operation.
*/
void
SetStartupBufferPinWaitBufId(int bufid)
{
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
procglobal->startupBufferPinWaitBufId = bufid;
}
/*
* Used by backends when they receive a request to check for buffer pin waits.
*/
int
GetStartupBufferPinWaitBufId(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
return procglobal->startupBufferPinWaitBufId;
}
/*
* Check whether there are at least N free PGPROC objects.
*
* Note: this is designed on the assumption that N will generally be small.
*/
bool
HaveNFreeProcs(int n)
{
PGPROC *proc;
2005-10-15 04:49:52 +02:00
SpinLockAcquire(ProcStructLock);
proc = ProcGlobal->freeProcs;
while (n > 0 && proc != NULL)
{
proc = (PGPROC *) proc->links.next;
n--;
}
SpinLockRelease(ProcStructLock);
return (n <= 0);
}
Improve control logic for bgwriter hibernation mode. Commit 6d90eaaa89a007e0d365f49d6436f35d2392cfeb added a hibernation mode to the bgwriter to reduce the server's idle-power consumption. However, its interaction with the detailed behavior of BgBufferSync's feedback control loop wasn't very well thought out. That control loop depends primarily on the rate of buffer allocation, not the rate of buffer dirtying, so the hibernation mode has to be designed to operate only when no new buffer allocations are happening. Also, the check for whether the system is effectively idle was not quite right and would fail to detect a constant low level of activity, thus allowing the bgwriter to go into hibernation mode in a way that would let the cycle time vary quite a bit, possibly further confusing the feedback loop. To fix, move the wakeup support from MarkBufferDirty and SetBufferCommitInfoNeedsSave into StrategyGetBuffer, and prevent the bgwriter from entering hibernation mode unless no buffer allocations have happened recently. In addition, fix the delaying logic to remove the problem of possibly not responding to signals promptly, which was basically caused by trying to use the process latch's is_set flag for multiple purposes. I can't prove it but I'm suspicious that that hack was responsible for the intermittent "postmaster does not shut down" failures we've been seeing in the buildfarm lately. In any case it did nothing to improve the readability or robustness of the code. In passing, express the hibernation sleep time as a multiplier on BgWriterDelay, not a constant. I'm not sure whether there's any value in exposing the longer sleep time as an independently configurable setting, but we can at least make it act like this for little extra code.
2012-05-10 05:36:01 +02:00
/*
* Check if the current process is awaiting a lock.
*/
bool
IsWaitingForLock(void)
{
if (lockAwaited == NULL)
return false;
return true;
}
/*
* Cancel any pending wait for lock, when aborting a transaction, and revert
* any strong lock count acquisition for a lock being acquired.
*
* (Normally, this would only happen if we accept a cancel/die
* interrupt while waiting; but an ereport(ERROR) before or during the lock
* wait is within the realm of possibility, too.)
*/
void
LockErrorCleanup(void)
{
LWLock *partitionLock;
DisableTimeoutParams timeouts[2];
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
HOLD_INTERRUPTS();
AbortStrongLockAcquire();
/* Nothing to do if we weren't waiting for a lock */
if (lockAwaited == NULL)
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
{
RESUME_INTERRUPTS();
return;
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
}
/*
* Turn off the deadlock and lock timeout timers, if they are still
* running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
* indicator flag, since this function is executed before
* ProcessInterrupts when responding to SIGINT; else we'd lose the
* knowledge that the SIGINT came from a lock timeout and not an external
* source.
*/
timeouts[0].id = DEADLOCK_TIMEOUT;
timeouts[0].keep_indicator = false;
timeouts[1].id = LOCK_TIMEOUT;
timeouts[1].keep_indicator = true;
disable_timeouts(timeouts, 2);
/* Unlink myself from the wait queue, if on it (might not be anymore!) */
partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
if (MyProc->links.next != NULL)
{
/* We could not have been granted the lock yet */
RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
}
else
{
/*
* Somebody kicked us off the lock queue already. Perhaps they
2005-10-15 04:49:52 +02:00
* granted us the lock, or perhaps they detected a deadlock. If they
* did grant us the lock, we'd better remember it in our local lock
* table.
*/
if (MyProc->waitStatus == STATUS_OK)
GrantAwaitedLock();
}
lockAwaited = NULL;
LWLockRelease(partitionLock);
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
RESUME_INTERRUPTS();
}
/*
* ProcReleaseLocks() -- release locks associated with current transaction
* at main transaction commit or abort
*
2012-05-04 23:43:27 +02:00
* At main transaction commit, we release standard locks except session locks.
* At main transaction abort, we release all locks including session locks.
*
2012-05-04 23:43:27 +02:00
* Advisory locks are released only if they are transaction-level;
* session-level holds remain, whether this is a commit or not.
*
* At subtransaction commit, we don't release any locks (so this func is not
* needed at all); we will defer the releasing to the parent transaction.
* At subtransaction abort, we release all locks held by the subtransaction;
* this is implemented by retail releasing of the locks under control of
* the ResourceOwner mechanism.
*/
void
ProcReleaseLocks(bool isCommit)
{
if (!MyProc)
return;
/* If waiting, get off wait queue (should only be needed after error) */
LockErrorCleanup();
2012-05-04 23:43:27 +02:00
/* Release standard locks, including session-level if aborting */
LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
2012-05-04 23:43:27 +02:00
/* Release transaction-level advisory locks */
LockReleaseAll(USER_LOCKMETHOD, false);
}
/*
* RemoveProcFromArray() -- Remove this process from the shared ProcArray.
*/
static void
RemoveProcFromArray(int code, Datum arg)
{
Assert(MyProc != NULL);
ProcArrayRemove(MyProc, InvalidTransactionId);
}
/*
* ProcKill() -- Destroy the per-proc data structure for
* this process. Release any of its held LW locks.
*/
static void
ProcKill(int code, Datum arg)
{
PGPROC *proc;
PGPROC * volatile * procgloballist;
Assert(MyProc != NULL);
/* Make sure we're out of the sync rep lists */
SyncRepCleanupAtProcExit();
#ifdef USE_ASSERT_CHECKING
{
int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
}
#endif
/*
2005-10-15 04:49:52 +02:00
* Release any LW locks I am holding. There really shouldn't be any, but
* it's cheap to check again before we cut the knees off the LWLock
* facility by releasing our PGPROC ...
*/
LWLockReleaseAll();
/* Make sure active replication slots are released */
if (MyReplicationSlot != NULL)
ReplicationSlotRelease();
/*
* Detach from any lock group of which we are a member. If the leader
* exist before all other group members, it's PGPROC will remain allocated
* until the last group process exits; that process must return the
* leader's PGPROC to the appropriate list.
*/
if (MyProc->lockGroupLeader != NULL)
{
PGPROC *leader = MyProc->lockGroupLeader;
LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
Assert(!dlist_is_empty(&leader->lockGroupMembers));
dlist_delete(&MyProc->lockGroupLink);
if (dlist_is_empty(&leader->lockGroupMembers))
{
leader->lockGroupLeader = NULL;
if (leader != MyProc)
{
procgloballist = leader->procgloballist;
/* Leader exited first; return its PGPROC. */
SpinLockAcquire(ProcStructLock);
leader->links.next = (SHM_QUEUE *) *procgloballist;
*procgloballist = leader;
SpinLockRelease(ProcStructLock);
}
}
else if (leader != MyProc)
MyProc->lockGroupLeader = NULL;
LWLockRelease(leader_lwlock);
}
/*
* Reset MyLatch to the process local one. This is so that signal
* handlers et al can continue using the latch after the shared latch
* isn't ours anymore. After that clear MyProc and disown the shared
* latch.
*/
SwitchBackToLocalLatch();
proc = MyProc;
MyProc = NULL;
DisownLatch(&proc->procLatch);
procgloballist = proc->procgloballist;
SpinLockAcquire(ProcStructLock);
/*
* If we're still a member of a locking group, that means we're a leader
* which has somehow exited before its children. The last remaining child
* will release our PGPROC. Otherwise, release it now.
*/
if (proc->lockGroupLeader == NULL)
{
/* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
Assert(dlist_is_empty(&proc->lockGroupMembers));
/* Return PGPROC structure (and semaphore) to appropriate freelist */
proc->links.next = (SHM_QUEUE *) *procgloballist;
*procgloballist = proc;
}
/* Update shared estimate of spins_per_delay */
ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
SpinLockRelease(ProcStructLock);
/*
* This process is no longer present in shared memory in any meaningful
2010-02-26 03:01:40 +01:00
* way, so tell the postmaster we've cleaned up acceptably well. (XXX
* autovac launcher should be included here someday)
*/
if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
MarkPostmasterChildInactive();
/* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
if (AutovacuumLauncherPid != 0)
kill(AutovacuumLauncherPid, SIGUSR2);
}
/*
* AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
* processes (bgwriter, etc). The PGPROC and sema are not released, only
* marked as not-in-use.
*/
static void
AuxiliaryProcKill(int code, Datum arg)
{
2004-08-29 07:07:03 +02:00
int proctype = DatumGetInt32(arg);
PGPROC *auxproc PG_USED_FOR_ASSERTS_ONLY;
PGPROC *proc;
Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
auxproc = &AuxiliaryProcs[proctype];
Assert(MyProc == auxproc);
/* Release any LW locks I am holding (see notes above) */
LWLockReleaseAll();
/*
* Reset MyLatch to the process local one. This is so that signal
* handlers et al can continue using the latch after the shared latch
* isn't ours anymore. After that clear MyProc and disown the shared
* latch.
*/
SwitchBackToLocalLatch();
proc = MyProc;
MyProc = NULL;
DisownLatch(&proc->procLatch);
SpinLockAcquire(ProcStructLock);
/* Mark auxiliary proc no longer in use */
proc->pid = 0;
/* Update shared estimate of spins_per_delay */
ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
SpinLockRelease(ProcStructLock);
}
/*
* ProcQueue package: routines for putting processes to sleep
* and waking them up
*/
/*
* ProcQueueAlloc -- alloc/attach to a shared memory process queue
*
* Returns: a pointer to the queue
* Side Effects: Initializes the queue if it wasn't there before
*/
#ifdef NOT_USED
PROC_QUEUE *
ProcQueueAlloc(const char *name)
{
PROC_QUEUE *queue;
bool found;
queue = (PROC_QUEUE *)
ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
if (!found)
ProcQueueInit(queue);
1998-09-01 05:29:17 +02:00
return queue;
}
#endif
/*
* ProcQueueInit -- initialize a shared memory process queue
*/
void
ProcQueueInit(PROC_QUEUE *queue)
{
SHMQueueInit(&(queue->links));
queue->size = 0;
}
/*
* ProcSleep -- put a process to sleep on the specified lock
*
* Caller must have set MyProc->heldLocks to reflect locks already held
* on the lockable object by this process (under all XIDs).
*
* The lock table's partition lock must be held at entry, and will be held
* at exit.
*
* Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
*
* ASSUME: that no one will fiddle with the queue until after
* we release the partition lock.
*
* NOTES: The process queue is now a priority queue for locking.
*/
int
ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
{
LOCKMODE lockmode = locallock->tag.mode;
LOCK *lock = locallock->lock;
PROCLOCK *proclock = locallock->proclock;
uint32 hashcode = locallock->hashcode;
LWLock *partitionLock = LockHashPartitionLock(hashcode);
PROC_QUEUE *waitQueue = &(lock->waitProcs);
Try to reduce confusion about what is a lock method identifier, a lock method control structure, or a table of control structures. . Use type LOCKMASK where an int is not a counter. . Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead. . Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because LOCKMETHOD is not a pointer. . Define and use macro LockMethodIsValid. . Rename LOCKMETHOD to LOCKMETHODID. . Remove global variable LongTermTableId in lmgr.c, because it is never used. . Make LockTableId static in lmgr.c, because it is used nowhere else. Why not remove it and use DEFAULT_LOCKMETHOD? . Rename the lock method control structure from LOCKMETHODTABLE to LockMethodData. Introduce a pointer type named LockMethod. . Remove elog(FATAL) after InitLockTable() call in CreateSharedMemoryAndSemaphores(), because if something goes wrong, there is elog(FATAL) in LockMethodTableInit(), and if this doesn't help, an elog(ERROR) in InitLockTable() is promoted to FATAL. . Make InitLockTable() void, because its only caller does not use its return value any more. . Rename variables in lock.c to avoid statements like LockMethodTable[NumLockMethods] = lockMethodTable; lockMethodTable = LockMethodTable[lockmethod]; . Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG. . Remove static variables BITS_OFF and BITS_ON from lock.c, because I agree to this doubt: * XXX is a fetch from a static array really faster than a shift? . Define and use macros LOCKBIT_ON/OFF. Manfred Koizar
2003-12-01 22:59:25 +01:00
LOCKMASK myHeldLocks = MyProc->heldLocks;
bool early_deadlock = false;
2007-11-15 22:14:46 +01:00
bool allow_autovacuum_cancel = true;
int myWaitStatus;
PGPROC *proc;
PGPROC *leader = MyProc->lockGroupLeader;
int i;
/*
* If group locking is in use, locks held by members of my locking group
* need to be included in myHeldLocks.
*/
if (leader != NULL)
{
SHM_QUEUE *procLocks = &(lock->procLocks);
PROCLOCK *otherproclock;
otherproclock = (PROCLOCK *)
SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
while (otherproclock != NULL)
{
if (otherproclock->groupLeader == leader)
myHeldLocks |= otherproclock->holdMask;
otherproclock = (PROCLOCK *)
SHMQueueNext(procLocks, &otherproclock->lockLink,
offsetof(PROCLOCK, lockLink));
}
}
/*
* Determine where to add myself in the wait queue.
*
* Normally I should go at the end of the queue. However, if I already
* hold locks that conflict with the request of any previous waiter, put
* myself in the queue just in front of the first such waiter. This is not
* a necessary step, since deadlock detection would move me to before that
2005-10-15 04:49:52 +02:00
* waiter anyway; but it's relatively cheap to detect such a conflict
* immediately, and avoid delaying till deadlock timeout.
*
* Special case: if I find I should go in front of some waiter, check to
* see if I conflict with already-held locks or the requests before that
* waiter. If not, then just grant myself the requested lock immediately.
2005-10-15 04:49:52 +02:00
* This is the same as the test for immediate grant in LockAcquire, except
* we are only considering the part of the wait queue before my insertion
* point.
*/
if (myHeldLocks != 0)
{
Try to reduce confusion about what is a lock method identifier, a lock method control structure, or a table of control structures. . Use type LOCKMASK where an int is not a counter. . Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead. . Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because LOCKMETHOD is not a pointer. . Define and use macro LockMethodIsValid. . Rename LOCKMETHOD to LOCKMETHODID. . Remove global variable LongTermTableId in lmgr.c, because it is never used. . Make LockTableId static in lmgr.c, because it is used nowhere else. Why not remove it and use DEFAULT_LOCKMETHOD? . Rename the lock method control structure from LOCKMETHODTABLE to LockMethodData. Introduce a pointer type named LockMethod. . Remove elog(FATAL) after InitLockTable() call in CreateSharedMemoryAndSemaphores(), because if something goes wrong, there is elog(FATAL) in LockMethodTableInit(), and if this doesn't help, an elog(ERROR) in InitLockTable() is promoted to FATAL. . Make InitLockTable() void, because its only caller does not use its return value any more. . Rename variables in lock.c to avoid statements like LockMethodTable[NumLockMethods] = lockMethodTable; lockMethodTable = LockMethodTable[lockmethod]; . Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG. . Remove static variables BITS_OFF and BITS_ON from lock.c, because I agree to this doubt: * XXX is a fetch from a static array really faster than a shift? . Define and use macros LOCKBIT_ON/OFF. Manfred Koizar
2003-12-01 22:59:25 +01:00
LOCKMASK aheadRequests = 0;
proc = (PGPROC *) waitQueue->links.next;
for (i = 0; i < waitQueue->size; i++)
{
/*
* If we're part of the same locking group as this waiter, its
* locks neither conflict with ours nor contribute to
* aheadRequests.
*/
if (leader != NULL && leader == proc->lockGroupLeader)
{
proc = (PGPROC *) proc->links.next;
continue;
}
/* Must he wait for me? */
if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
{
/* Must I wait for him ? */
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
* Yes, so we have a deadlock. Easiest way to clean up
2005-10-15 04:49:52 +02:00
* correctly is to call RemoveFromWaitQueue(), but we
* can't do that until we are *on* the wait queue. So, set
* a flag to check below, and break out of loop. Also,
* record deadlock info for later message.
*/
RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
early_deadlock = true;
break;
}
/* I must go before this waiter. Check special case. */
if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
LockCheckConflicts(lockMethodTable,
lockmode,
lock,
proclock) == STATUS_OK)
{
/* Skip the wait and just grant myself the lock. */
GrantLock(lock, proclock, lockmode);
GrantAwaitedLock();
return STATUS_OK;
}
/* Break out of loop to put myself before him */
break;
}
/* Nope, so advance to next waiter */
Try to reduce confusion about what is a lock method identifier, a lock method control structure, or a table of control structures. . Use type LOCKMASK where an int is not a counter. . Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead. . Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because LOCKMETHOD is not a pointer. . Define and use macro LockMethodIsValid. . Rename LOCKMETHOD to LOCKMETHODID. . Remove global variable LongTermTableId in lmgr.c, because it is never used. . Make LockTableId static in lmgr.c, because it is used nowhere else. Why not remove it and use DEFAULT_LOCKMETHOD? . Rename the lock method control structure from LOCKMETHODTABLE to LockMethodData. Introduce a pointer type named LockMethod. . Remove elog(FATAL) after InitLockTable() call in CreateSharedMemoryAndSemaphores(), because if something goes wrong, there is elog(FATAL) in LockMethodTableInit(), and if this doesn't help, an elog(ERROR) in InitLockTable() is promoted to FATAL. . Make InitLockTable() void, because its only caller does not use its return value any more. . Rename variables in lock.c to avoid statements like LockMethodTable[NumLockMethods] = lockMethodTable; lockMethodTable = LockMethodTable[lockmethod]; . Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG. . Remove static variables BITS_OFF and BITS_ON from lock.c, because I agree to this doubt: * XXX is a fetch from a static array really faster than a shift? . Define and use macros LOCKBIT_ON/OFF. Manfred Koizar
2003-12-01 22:59:25 +01:00
aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
proc = (PGPROC *) proc->links.next;
}
2001-03-22 05:01:46 +01:00
/*
2005-10-15 04:49:52 +02:00
* If we fall out of loop normally, proc points to waitQueue head, so
* we will insert at tail of queue as desired.
*/
}
else
{
/* I hold no locks, so I can't push in front of anyone. */
proc = (PGPROC *) &(waitQueue->links);
}
/*
2005-10-15 04:49:52 +02:00
* Insert self into queue, ahead of the given proc (or at tail of queue).
*/
SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
1998-01-27 04:00:43 +01:00
waitQueue->size++;
Try to reduce confusion about what is a lock method identifier, a lock method control structure, or a table of control structures. . Use type LOCKMASK where an int is not a counter. . Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead. . Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because LOCKMETHOD is not a pointer. . Define and use macro LockMethodIsValid. . Rename LOCKMETHOD to LOCKMETHODID. . Remove global variable LongTermTableId in lmgr.c, because it is never used. . Make LockTableId static in lmgr.c, because it is used nowhere else. Why not remove it and use DEFAULT_LOCKMETHOD? . Rename the lock method control structure from LOCKMETHODTABLE to LockMethodData. Introduce a pointer type named LockMethod. . Remove elog(FATAL) after InitLockTable() call in CreateSharedMemoryAndSemaphores(), because if something goes wrong, there is elog(FATAL) in LockMethodTableInit(), and if this doesn't help, an elog(ERROR) in InitLockTable() is promoted to FATAL. . Make InitLockTable() void, because its only caller does not use its return value any more. . Rename variables in lock.c to avoid statements like LockMethodTable[NumLockMethods] = lockMethodTable; lockMethodTable = LockMethodTable[lockmethod]; . Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG. . Remove static variables BITS_OFF and BITS_ON from lock.c, because I agree to this doubt: * XXX is a fetch from a static array really faster than a shift? . Define and use macros LOCKBIT_ON/OFF. Manfred Koizar
2003-12-01 22:59:25 +01:00
lock->waitMask |= LOCKBIT_ON(lockmode);
/* Set up wait information in PGPROC object, too */
MyProc->waitLock = lock;
MyProc->waitProcLock = proclock;
MyProc->waitLockMode = lockmode;
MyProc->waitStatus = STATUS_WAITING;
/*
2005-10-15 04:49:52 +02:00
* If we detected deadlock, give up without waiting. This must agree with
* CheckDeadLock's recovery code, except that we shouldn't release the
* semaphore since we haven't tried to lock it yet.
*/
if (early_deadlock)
{
RemoveFromWaitQueue(MyProc, hashcode);
return STATUS_ERROR;
}
/* mark that we are waiting for a lock */
lockAwaited = locallock;
/*
* Release the lock table's partition lock.
*
* NOTE: this may also cause us to exit critical-section state, possibly
2005-10-15 04:49:52 +02:00
* allowing a cancel/die interrupt to be accepted. This is OK because we
* have recorded the fact that we are waiting for a lock, and so
* LockErrorCleanup will clean up if cancel/die happens.
*/
LWLockRelease(partitionLock);
/*
* Also, now that we will successfully clean up after an ereport, it's
* safe to check to see if there's a buffer pin deadlock against the
* Startup process. Of course, that's only necessary if we're doing Hot
* Standby and are not the Startup process ourselves.
*/
if (RecoveryInProgress() && !InRecovery)
CheckRecoveryConflictDeadlock();
/* Reset deadlock_state before enabling the timeout handler */
deadlock_state = DS_NOT_YET_CHECKED;
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
got_deadlock_timeout = false;
/*
2005-10-15 04:49:52 +02:00
* Set timer so we can wake up after awhile and check for a deadlock. If a
* deadlock is detected, the handler releases the process's semaphore and
* sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
* must report failure rather than success.
*
* By delaying the check until we've waited for a bit, we can avoid
* running the rather expensive deadlock-check code in most cases.
*
* If LockTimeout is set, also enable the timeout for that. We can save a
* few cycles by enabling both timeout sources in one call.
*/
if (LockTimeout > 0)
{
EnableTimeoutParams timeouts[2];
timeouts[0].id = DEADLOCK_TIMEOUT;
timeouts[0].type = TMPARAM_AFTER;
timeouts[0].delay_ms = DeadlockTimeout;
timeouts[1].id = LOCK_TIMEOUT;
timeouts[1].type = TMPARAM_AFTER;
timeouts[1].delay_ms = LockTimeout;
enable_timeouts(timeouts, 2);
}
else
enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
/*
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
* If somebody wakes us between LWLockRelease and WaitLatch, the latch
* will not wait. But a set latch does not necessarily mean that the lock
* is free now, as there are many other sources for latch sets than
* somebody releasing the lock.
*
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
* We process interrupts whenever the latch has been set, so cancel/die
* interrupts are processed quickly. This means we must not mind losing
* control to a cancel/die interrupt here. We don't, because we have no
* shared-state-change work to do after being granted the lock (the
* grantor did it all). We do have to worry about canceling the deadlock
* timeout and updating the locallock table, but if we lose control to an
* error, LockErrorCleanup will fix that up.
*/
2006-10-04 02:30:14 +02:00
do
{
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
WaitLatch(MyLatch, WL_LATCH_SET, 0);
ResetLatch(MyLatch);
/* check for deadlocks first, as that's probably log-worthy */
if (got_deadlock_timeout)
{
CheckDeadLock();
got_deadlock_timeout = false;
}
CHECK_FOR_INTERRUPTS();
/*
* waitStatus could change from STATUS_WAITING to something else
* asynchronously. Read it just once per loop to prevent surprising
* behavior (such as missing log messages).
*/
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
myWaitStatus = *((volatile int *) &MyProc->waitStatus);
/*
* If we are not deadlocked, but are waiting on an autovacuum-induced
2007-11-15 22:14:46 +01:00
* task, send a signal to interrupt it.
*/
if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
{
2007-11-15 22:14:46 +01:00
PGPROC *autovac = GetBlockingAutoVacuumPgproc();
PGXACT *autovac_pgxact = &ProcGlobal->allPgXact[autovac->pgprocno];
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
/*
* Only do it if the worker is not working to protect against Xid
* wraparound.
*/
if ((autovac_pgxact->vacuumFlags & PROC_IS_AUTOVACUUM) &&
!(autovac_pgxact->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND))
{
2007-11-15 22:14:46 +01:00
int pid = autovac->pid;
StringInfoData locktagbuf;
StringInfoData logbuf; /* errdetail for server log */
initStringInfo(&locktagbuf);
initStringInfo(&logbuf);
DescribeLockTag(&locktagbuf, &lock->tag);
appendStringInfo(&logbuf,
_("Process %d waits for %s on %s."),
MyProcPid,
GetLockmodeName(lock->tag.locktag_lockmethodid,
lockmode),
locktagbuf.data);
/* release lock as quickly as possible */
LWLockRelease(ProcArrayLock);
/* send the autovacuum worker Back to Old Kent Road */
ereport(DEBUG1,
(errmsg("sending cancel to blocking autovacuum PID %d",
pid),
errdetail_log("%s", logbuf.data)));
if (kill(pid, SIGINT) < 0)
{
/*
* There's a race condition here: once we release the
* ProcArrayLock, it's possible for the autovac worker to
* close up shop and exit before we can do the kill().
* Therefore, we do not whinge about no-such-process.
* Other errors such as EPERM could conceivably happen if
* the kernel recycles the PID fast enough, but such cases
* seem improbable enough that it's probably best to issue
* a warning if we see some other errno.
*/
if (errno != ESRCH)
ereport(WARNING,
(errmsg("could not send signal to process %d: %m",
pid)));
}
pfree(logbuf.data);
pfree(locktagbuf.data);
}
else
LWLockRelease(ProcArrayLock);
/* prevent signal from being resent more than once */
allow_autovacuum_cancel = false;
}
/*
* If awoken after the deadlock check interrupt has run, and
* log_lock_waits is on, then report about the wait.
*/
if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
{
StringInfoData buf,
lock_waiters_sbuf,
lock_holders_sbuf;
const char *modename;
long secs;
int usecs;
long msecs;
SHM_QUEUE *procLocks;
PROCLOCK *proclock;
bool first_holder = true,
first_waiter = true;
int lockHoldersNum = 0;
initStringInfo(&buf);
initStringInfo(&lock_waiters_sbuf);
initStringInfo(&lock_holders_sbuf);
DescribeLockTag(&buf, &locallock->tag.lock);
modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
lockmode);
TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
GetCurrentTimestamp(),
&secs, &usecs);
msecs = secs * 1000 + usecs / 1000;
usecs = usecs % 1000;
/*
* we loop over the lock's procLocks to gather a list of all
* holders and waiters. Thus we will be able to provide more
* detailed information for lock debugging purposes.
*
* lock->procLocks contains all processes which hold or wait for
* this lock.
*/
LWLockAcquire(partitionLock, LW_SHARED);
procLocks = &(lock->procLocks);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
offsetof(PROCLOCK, lockLink));
while (proclock)
{
/*
* we are a waiter if myProc->waitProcLock == proclock; we are
* a holder if it is NULL or something different
*/
if (proclock->tag.myProc->waitProcLock == proclock)
{
if (first_waiter)
{
appendStringInfo(&lock_waiters_sbuf, "%d",
proclock->tag.myProc->pid);
first_waiter = false;
}
else
appendStringInfo(&lock_waiters_sbuf, ", %d",
proclock->tag.myProc->pid);
}
else
{
if (first_holder)
{
appendStringInfo(&lock_holders_sbuf, "%d",
proclock->tag.myProc->pid);
first_holder = false;
}
else
appendStringInfo(&lock_holders_sbuf, ", %d",
proclock->tag.myProc->pid);
lockHoldersNum++;
}
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
offsetof(PROCLOCK, lockLink));
}
LWLockRelease(partitionLock);
if (deadlock_state == DS_SOFT_DEADLOCK)
ereport(LOG,
(errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs),
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
else if (deadlock_state == DS_HARD_DEADLOCK)
{
/*
2007-11-15 22:14:46 +01:00
* This message is a bit redundant with the error that will be
* reported subsequently, but in some cases the error report
* might not make it to the log (eg, if it's caught by an
* exception handler), and we want to ensure all long-wait
* events get logged.
*/
ereport(LOG,
(errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs),
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
}
if (myWaitStatus == STATUS_WAITING)
ereport(LOG,
(errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs),
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
else if (myWaitStatus == STATUS_OK)
ereport(LOG,
2007-11-15 22:14:46 +01:00
(errmsg("process %d acquired %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs)));
else
{
Assert(myWaitStatus == STATUS_ERROR);
2007-11-15 22:14:46 +01:00
/*
* Currently, the deadlock checker always kicks its own
2007-11-15 22:14:46 +01:00
* process, which means that we'll only see STATUS_ERROR when
* deadlock_state == DS_HARD_DEADLOCK, and there's no need to
* print redundant messages. But for completeness and
* future-proofing, print a message if it looks like someone
* else kicked us off the lock.
*/
if (deadlock_state != DS_HARD_DEADLOCK)
ereport(LOG,
(errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
MyProcPid, modename, buf.data, msecs, usecs),
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
"Processes holding the lock: %s. Wait queue: %s.",
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
}
/*
2007-11-15 22:14:46 +01:00
* At this point we might still need to wait for the lock. Reset
* state so we don't print the above messages again.
*/
deadlock_state = DS_NO_DEADLOCK;
pfree(buf.data);
pfree(lock_holders_sbuf.data);
pfree(lock_waiters_sbuf.data);
}
} while (myWaitStatus == STATUS_WAITING);
/*
Fix assorted race conditions in the new timeout infrastructure. Prevent handle_sig_alarm from losing control partway through due to a query cancel (either an asynchronous SIGINT, or a cancel triggered by one of the timeout handler functions). That would at least result in failure to schedule any required future interrupt, and might result in actual corruption of timeout.c's data structures, if the interrupt happened while we were updating those. We could still lose control if an asynchronous SIGINT arrives just as the function is entered. This wouldn't break any data structures, but it would have the same effect as if the SIGALRM interrupt had been silently lost: we'd not fire any currently-due handlers, nor schedule any new interrupt. To forestall that scenario, forcibly reschedule any pending timer interrupt during AbortTransaction and AbortSubTransaction. We can avoid any extra kernel call in most cases by not doing that until we've allowed LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events. Another hazard is that some platforms (at least Linux and *BSD) block a signal before calling its handler and then unblock it on return. When we longjmp out of the handler, the unblock doesn't happen, and the signal is left blocked indefinitely. Again, we can fix that by forcibly unblocking signals during AbortTransaction and AbortSubTransaction. These latter two problems do not manifest when the longjmp reaches postgres.c, because the error recovery code there kills all pending timeout events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal mask is restored. So errors thrown outside any transaction should be OK already, and cleaning up in AbortTransaction and AbortSubTransaction should be enough to fix these issues. (We're assuming that any code that catches a query cancel error and doesn't re-throw it will do at least a subtransaction abort to clean up; but that was pretty much required already by other subsystems.) Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when disabling that event: if a lock timeout interrupt happened after the lock was granted, the ensuing query cancel is still going to happen at the next CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user cancel. Per reports from Dan Wood. Back-patch to 9.3 where the new timeout handling infrastructure was introduced. We may at some point decide to back-patch the signal unblocking changes further, but I'll desist from that until we hear actual field complaints about it.
2013-11-29 22:41:00 +01:00
* Disable the timers, if they are still running. As in LockErrorCleanup,
* we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
* already caused QueryCancelPending to become set, we want the cancel to
* be reported as a lock timeout, not a user cancel.
1998-12-29 20:32:08 +01:00
*/
if (LockTimeout > 0)
{
DisableTimeoutParams timeouts[2];
timeouts[0].id = DEADLOCK_TIMEOUT;
timeouts[0].keep_indicator = false;
timeouts[1].id = LOCK_TIMEOUT;
Fix assorted race conditions in the new timeout infrastructure. Prevent handle_sig_alarm from losing control partway through due to a query cancel (either an asynchronous SIGINT, or a cancel triggered by one of the timeout handler functions). That would at least result in failure to schedule any required future interrupt, and might result in actual corruption of timeout.c's data structures, if the interrupt happened while we were updating those. We could still lose control if an asynchronous SIGINT arrives just as the function is entered. This wouldn't break any data structures, but it would have the same effect as if the SIGALRM interrupt had been silently lost: we'd not fire any currently-due handlers, nor schedule any new interrupt. To forestall that scenario, forcibly reschedule any pending timer interrupt during AbortTransaction and AbortSubTransaction. We can avoid any extra kernel call in most cases by not doing that until we've allowed LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events. Another hazard is that some platforms (at least Linux and *BSD) block a signal before calling its handler and then unblock it on return. When we longjmp out of the handler, the unblock doesn't happen, and the signal is left blocked indefinitely. Again, we can fix that by forcibly unblocking signals during AbortTransaction and AbortSubTransaction. These latter two problems do not manifest when the longjmp reaches postgres.c, because the error recovery code there kills all pending timeout events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal mask is restored. So errors thrown outside any transaction should be OK already, and cleaning up in AbortTransaction and AbortSubTransaction should be enough to fix these issues. (We're assuming that any code that catches a query cancel error and doesn't re-throw it will do at least a subtransaction abort to clean up; but that was pretty much required already by other subsystems.) Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when disabling that event: if a lock timeout interrupt happened after the lock was granted, the ensuing query cancel is still going to happen at the next CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user cancel. Per reports from Dan Wood. Back-patch to 9.3 where the new timeout handling infrastructure was introduced. We may at some point decide to back-patch the signal unblocking changes further, but I'll desist from that until we hear actual field complaints about it.
2013-11-29 22:41:00 +01:00
timeouts[1].keep_indicator = true;
disable_timeouts(timeouts, 2);
}
else
disable_timeout(DEADLOCK_TIMEOUT, false);
1998-12-29 20:32:08 +01:00
/*
2006-10-04 02:30:14 +02:00
* Re-acquire the lock table's partition lock. We have to do this to hold
* off cancel/die interrupts before we can mess with lockAwaited (else we
* might have a missed or duplicated locallock update).
*/
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
/*
* We no longer want LockErrorCleanup to do anything.
*/
lockAwaited = NULL;
/*
* If we got the lock, be sure to remember it in the locallock table.
*/
if (MyProc->waitStatus == STATUS_OK)
GrantAwaitedLock();
/*
* We don't have to do anything else, because the awaker did all the
* necessary update of the lock table and MyProc.
*/
return MyProc->waitStatus;
}
/*
* ProcWakeup -- wake up a process by releasing its private semaphore.
*
* Also remove the process from the wait queue and set its links invalid.
* RETURN: the next process in the wait queue.
*
* The appropriate lock partition lock must be held by caller.
*
* XXX: presently, this code is only used for the "success" case, and only
* works correctly for that case. To clean up in failure case, would need
* to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
* Hence, in practice the waitStatus parameter must be STATUS_OK.
*/
PGPROC *
ProcWakeup(PGPROC *proc, int waitStatus)
{
PGPROC *retProc;
/* Proc should be sleeping ... */
if (proc->links.prev == NULL ||
proc->links.next == NULL)
return NULL;
Assert(proc->waitStatus == STATUS_WAITING);
/* Save next process before we zap the list link */
retProc = (PGPROC *) proc->links.next;
/* Remove process from wait queue */
SHMQueueDelete(&(proc->links));
(proc->waitLock->waitProcs.size)--;
/* Clean up process' state and pass it the ok/fail signal */
proc->waitLock = NULL;
proc->waitProcLock = NULL;
proc->waitStatus = waitStatus;
/* And awaken it */
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
SetLatch(&proc->procLatch);
return retProc;
}
/*
* ProcLockWakeup -- routine for waking up processes when a lock is
* released (or a prior waiter is aborted). Scan all waiters
* for lock, waken any that are no longer blocked.
*
* The appropriate lock partition lock must be held by caller.
*/
void
Try to reduce confusion about what is a lock method identifier, a lock method control structure, or a table of control structures. . Use type LOCKMASK where an int is not a counter. . Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead. . Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because LOCKMETHOD is not a pointer. . Define and use macro LockMethodIsValid. . Rename LOCKMETHOD to LOCKMETHODID. . Remove global variable LongTermTableId in lmgr.c, because it is never used. . Make LockTableId static in lmgr.c, because it is used nowhere else. Why not remove it and use DEFAULT_LOCKMETHOD? . Rename the lock method control structure from LOCKMETHODTABLE to LockMethodData. Introduce a pointer type named LockMethod. . Remove elog(FATAL) after InitLockTable() call in CreateSharedMemoryAndSemaphores(), because if something goes wrong, there is elog(FATAL) in LockMethodTableInit(), and if this doesn't help, an elog(ERROR) in InitLockTable() is promoted to FATAL. . Make InitLockTable() void, because its only caller does not use its return value any more. . Rename variables in lock.c to avoid statements like LockMethodTable[NumLockMethods] = lockMethodTable; lockMethodTable = LockMethodTable[lockmethod]; . Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG. . Remove static variables BITS_OFF and BITS_ON from lock.c, because I agree to this doubt: * XXX is a fetch from a static array really faster than a shift? . Define and use macros LOCKBIT_ON/OFF. Manfred Koizar
2003-12-01 22:59:25 +01:00
ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
{
PROC_QUEUE *waitQueue = &(lock->waitProcs);
int queue_size = waitQueue->size;
PGPROC *proc;
Try to reduce confusion about what is a lock method identifier, a lock method control structure, or a table of control structures. . Use type LOCKMASK where an int is not a counter. . Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead. . Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because LOCKMETHOD is not a pointer. . Define and use macro LockMethodIsValid. . Rename LOCKMETHOD to LOCKMETHODID. . Remove global variable LongTermTableId in lmgr.c, because it is never used. . Make LockTableId static in lmgr.c, because it is used nowhere else. Why not remove it and use DEFAULT_LOCKMETHOD? . Rename the lock method control structure from LOCKMETHODTABLE to LockMethodData. Introduce a pointer type named LockMethod. . Remove elog(FATAL) after InitLockTable() call in CreateSharedMemoryAndSemaphores(), because if something goes wrong, there is elog(FATAL) in LockMethodTableInit(), and if this doesn't help, an elog(ERROR) in InitLockTable() is promoted to FATAL. . Make InitLockTable() void, because its only caller does not use its return value any more. . Rename variables in lock.c to avoid statements like LockMethodTable[NumLockMethods] = lockMethodTable; lockMethodTable = LockMethodTable[lockmethod]; . Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG. . Remove static variables BITS_OFF and BITS_ON from lock.c, because I agree to this doubt: * XXX is a fetch from a static array really faster than a shift? . Define and use macros LOCKBIT_ON/OFF. Manfred Koizar
2003-12-01 22:59:25 +01:00
LOCKMASK aheadRequests = 0;
Assert(queue_size >= 0);
if (queue_size == 0)
return;
proc = (PGPROC *) waitQueue->links.next;
while (queue_size-- > 0)
{
2001-03-22 05:01:46 +01:00
LOCKMODE lockmode = proc->waitLockMode;
/*
2005-10-15 04:49:52 +02:00
* Waken if (a) doesn't conflict with requests of earlier waiters, and
* (b) doesn't conflict with already-held locks.
*/
if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
LockCheckConflicts(lockMethodTable,
lockmode,
lock,
proc->waitProcLock) == STATUS_OK)
{
/* OK to waken */
GrantLock(lock, proc->waitProcLock, lockmode);
proc = ProcWakeup(proc, STATUS_OK);
2001-03-22 05:01:46 +01:00
/*
2005-10-15 04:49:52 +02:00
* ProcWakeup removes proc from the lock's waiting process queue
* and returns the next proc in chain; don't use proc's next-link,
* because it's been cleared.
*/
}
else
{
2001-03-22 05:01:46 +01:00
/*
2005-10-15 04:49:52 +02:00
* Cannot wake this guy. Remember his request for later checks.
2001-03-22 05:01:46 +01:00
*/
Try to reduce confusion about what is a lock method identifier, a lock method control structure, or a table of control structures. . Use type LOCKMASK where an int is not a counter. . Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead. . Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because LOCKMETHOD is not a pointer. . Define and use macro LockMethodIsValid. . Rename LOCKMETHOD to LOCKMETHODID. . Remove global variable LongTermTableId in lmgr.c, because it is never used. . Make LockTableId static in lmgr.c, because it is used nowhere else. Why not remove it and use DEFAULT_LOCKMETHOD? . Rename the lock method control structure from LOCKMETHODTABLE to LockMethodData. Introduce a pointer type named LockMethod. . Remove elog(FATAL) after InitLockTable() call in CreateSharedMemoryAndSemaphores(), because if something goes wrong, there is elog(FATAL) in LockMethodTableInit(), and if this doesn't help, an elog(ERROR) in InitLockTable() is promoted to FATAL. . Make InitLockTable() void, because its only caller does not use its return value any more. . Rename variables in lock.c to avoid statements like LockMethodTable[NumLockMethods] = lockMethodTable; lockMethodTable = LockMethodTable[lockmethod]; . Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG. . Remove static variables BITS_OFF and BITS_ON from lock.c, because I agree to this doubt: * XXX is a fetch from a static array really faster than a shift? . Define and use macros LOCKBIT_ON/OFF. Manfred Koizar
2003-12-01 22:59:25 +01:00
aheadRequests |= LOCKBIT_ON(lockmode);
proc = (PGPROC *) proc->links.next;
}
}
Assert(waitQueue->size >= 0);
}
/*
* CheckDeadLock
*
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
* We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
* lock to be released by some other process. Check if there's a deadlock; if
* not, just return. (But signal ProcSleep to log a message, if
* log_lock_waits is true.) If we have a real deadlock, remove ourselves from
* the lock's wait queue and signal an error to ProcSleep.
*/
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
static void
CheckDeadLock(void)
{
int i;
/*
2006-10-04 02:30:14 +02:00
* Acquire exclusive lock on the entire shared lock data structures. Must
* grab LWLocks in partition-number order to avoid LWLock deadlock.
*
* Note that the deadlock check interrupt had better not be enabled
* anywhere that this process itself holds lock partition locks, else this
* will wait forever. Also note that LWLockAcquire creates a critical
* section, so that this routine cannot be interrupted by cancel/die
* interrupts.
*/
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
/*
* Check to see if we've been awoken by anyone in the interim.
*
* If we have, we can return and resume our transaction -- happy day.
* Before we are awoken the process releasing the lock grants it to us so
* we know that we don't have to wait anymore.
*
* We check by looking to see if we've been unlinked from the wait queue.
2005-10-15 04:49:52 +02:00
* This is quicker than checking our semaphore's state, since no kernel
* call is needed, and it is safe because we hold the lock partition lock.
*/
if (MyProc->links.prev == NULL ||
MyProc->links.next == NULL)
goto check_done;
#ifdef LOCK_DEBUG
if (Debug_deadlocks)
DumpAllLocks();
#endif
/* Run the deadlock check, and set deadlock_state for use by ProcSleep */
deadlock_state = DeadLockCheck(MyProc);
if (deadlock_state == DS_HARD_DEADLOCK)
1998-01-27 04:00:43 +01:00
{
/*
* Oops. We have a deadlock.
*
* Get this process out of wait state. (Note: we could do this more
* efficiently by relying on lockAwaited, but use this coding to
* preserve the flexibility to kill some other transaction than the
* one detecting the deadlock.)
*
* RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
* ProcSleep will report an error after we return from the signal
* handler.
*/
Assert(MyProc->waitLock != NULL);
RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
/*
* We're done here. Transaction abort caused by the error that
* ProcSleep will raise will cause any other locks we hold to be
* released, thus allowing other processes to wake up; we don't need
* to do that here. NOTE: an exception is that releasing locks we
* hold doesn't consider the possibility of waiters that were blocked
* behind us on the lock we just failed to get, and might now be
* wakable because we're not in front of them anymore. However,
* RemoveFromWaitQueue took care of waking up any such processes.
*/
}
/*
2007-11-15 22:14:46 +01:00
* And release locks. We do this in reverse order for two reasons: (1)
* Anyone else who needs more than one of the locks will be trying to lock
* them in increasing order; we don't want to release the other process
* until it can get all the locks it needs. (2) This avoids O(N^2)
* behavior inside LWLockRelease.
*/
check_done:
2006-10-04 02:30:14 +02:00
for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
LWLockRelease(LockHashPartitionLockByIndex(i));
}
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
/*
* CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
*
* NB: Runs inside a signal handler, be careful.
*/
void
CheckDeadLockAlert(void)
{
int save_errno = errno;
got_deadlock_timeout = true;
2015-05-24 03:35:49 +02:00
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
/*
* Have to set the latch again, even if handle_sig_alarm already did. Back
* then got_deadlock_timeout wasn't yet set... It's unlikely that this
* ever would be a problem, but setting a set latch again is cheap.
*/
SetLatch(MyLatch);
errno = save_errno;
}
/*
* ProcWaitForSignal - wait for a signal from another backend.
*
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
* As this uses the generic process latch the caller has to be robust against
* unrelated wakeups: Always check that the desired state has occurred, and
* wait again if not.
*/
void
ProcWaitForSignal(void)
{
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
WaitLatch(MyLatch, WL_LATCH_SET, 0);
ResetLatch(MyLatch);
CHECK_FOR_INTERRUPTS();
}
/*
* ProcSendSignal - send a signal to a backend identified by PID
*/
void
ProcSendSignal(int pid)
{
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
PGPROC *proc = NULL;
if (RecoveryInProgress())
{
SpinLockAcquire(ProcStructLock);
/*
* Check to see whether it is the Startup process we wish to signal.
2010-02-26 03:01:40 +01:00
* This call is made by the buffer manager when it wishes to wake up a
* process that has been waiting for a pin in so it can obtain a
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
* cleanup lock using LockBufferForCleanup(). Startup is not a normal
2010-02-26 03:01:40 +01:00
* backend, so BackendPidGetProc() will not return any pid at all. So
* we remember the information for this special case.
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
*/
if (pid == ProcGlobal->startupProcPid)
proc = ProcGlobal->startupProc;
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
SpinLockRelease(ProcStructLock);
}
if (proc == NULL)
proc = BackendPidGetProc(pid);
if (proc != NULL)
Move deadlock and other interrupt handling in proc.c out of signal handlers. Deadlock checking was performed inside signal handlers up to now. While it's a remarkable feat to have made this work reliably, it's quite complex to understand why that is the case. Partially it worked due to the assumption that semaphores are signal safe - which is not actually documented to be the case for sysv semaphores. The reason we had to rely on performing this work inside signal handlers is that semaphores aren't guaranteed to be interruptable by signals on all platforms. But now that latches provide a somewhat similar API, which actually has the guarantee of being interruptible, we can avoid doing so. Signalling between ProcSleep, ProcWakeup, ProcWaitForSignal and ProcSendSignal is now done using latches. This increases the likelihood of spurious wakeups. As spurious wakeup already were possible and aren't likely to be frequent enough to be an actual problem, this seems acceptable. This change would allow for further simplification of the deadlock checking, now that it doesn't have to run in a signal handler. But even if I were motivated to do so right now, it would still be better to do that separately. Such a cleanup shouldn't have to be reviewed a the same time as the more fundamental changes in this commit. There is one possible usability regression due to this commit. Namely it is more likely than before that log_lock_waits messages are output more than once. Reviewed-By: Heikki Linnakangas
2015-02-03 23:24:38 +01:00
{
SetLatch(&proc->procLatch);
}
}
/*
* BecomeLockGroupLeader - designate process as lock group leader
*
* Once this function has returned, other processes can join the lock group
* by calling BecomeLockGroupMember.
*/
void
BecomeLockGroupLeader(void)
{
LWLock *leader_lwlock;
/* If we already did it, we don't need to do it again. */
if (MyProc->lockGroupLeader == MyProc)
return;
/* We had better not be a follower. */
Assert(MyProc->lockGroupLeader == NULL);
/* Create single-member group, containing only ourselves. */
leader_lwlock = LockHashPartitionLockByProc(MyProc);
LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
MyProc->lockGroupLeader = MyProc;
dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
LWLockRelease(leader_lwlock);
}
/*
* BecomeLockGroupMember - designate process as lock group member
*
* This is pretty straightforward except for the possibility that the leader
* whose group we're trying to join might exit before we manage to do so;
* and the PGPROC might get recycled for an unrelated process. To avoid
* that, we require the caller to pass the PID of the intended PGPROC as
* an interlock. Returns true if we successfully join the intended lock
* group, and false if not.
*/
bool
BecomeLockGroupMember(PGPROC *leader, int pid)
{
LWLock *leader_lwlock;
bool ok = false;
/* Group leader can't become member of group */
Assert(MyProc != leader);
/* Can't already be a member of a group */
Assert(MyProc->lockGroupLeader == NULL);
/* PID must be valid. */
Assert(pid != 0);
/*
* Get lock protecting the group fields. Note LockHashPartitionLockByProc
* accesses leader->pgprocno in a PGPROC that might be free. This is safe
* because all PGPROCs' pgprocno fields are set during shared memory
* initialization and never change thereafter; so we will acquire the
* correct lock even if the leader PGPROC is in process of being recycled.
*/
leader_lwlock = LockHashPartitionLockByProc(leader);
LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
/* Is this the leader we're looking for? */
if (leader->pid == pid && leader->lockGroupLeader == leader)
{
/* OK, join the group */
ok = true;
MyProc->lockGroupLeader = leader;
dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
}
LWLockRelease(leader_lwlock);
return ok;
}