1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* proc.c
|
1996-07-09 08:22:35 +02:00
|
|
|
* routines to manage per-process shared memory data structure
|
|
|
|
*
|
2022-01-08 01:04:57 +01:00
|
|
|
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/storage/lmgr/proc.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Interface (a):
|
2001-01-25 04:31:16 +01:00
|
|
|
* ProcSleep(), ProcWakeup(),
|
1996-07-09 08:22:35 +02:00
|
|
|
* ProcQueueAlloc() -- create a shm queue for sleeping processes
|
|
|
|
* ProcQueueInit() -- create a queue without allocing memory
|
|
|
|
*
|
2005-12-11 22:02:18 +01:00
|
|
|
* Waiting for a lock causes the backend to be put to sleep. Whoever releases
|
|
|
|
* the lock wakes the process up again (and gives it an error code so it knows
|
1996-07-09 08:22:35 +02:00
|
|
|
* whether it was awoken on an error condition).
|
|
|
|
*
|
|
|
|
* Interface (b):
|
|
|
|
*
|
2000-12-18 01:44:50 +01:00
|
|
|
* ProcReleaseLocks -- frees the locks associated with current transaction
|
|
|
|
*
|
1996-07-09 08:22:35 +02:00
|
|
|
* ProcKill -- destroys the shared memory state (and locks)
|
2005-12-11 22:02:18 +01:00
|
|
|
* associated with the process.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2000-10-02 21:42:56 +02:00
|
|
|
#include "postgres.h"
|
|
|
|
|
1996-11-27 08:17:48 +01:00
|
|
|
#include <signal.h>
|
2001-10-01 20:16:35 +02:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <sys/time.h>
|
1996-11-03 06:08:01 +01:00
|
|
|
|
2006-07-13 18:49:20 +02:00
|
|
|
#include "access/transam.h"
|
2011-11-25 14:02:10 +01:00
|
|
|
#include "access/twophase.h"
|
2021-07-31 08:50:26 +02:00
|
|
|
#include "access/xlogutils.h"
|
2006-07-13 18:49:20 +02:00
|
|
|
#include "miscadmin.h"
|
2016-10-04 16:50:13 +02:00
|
|
|
#include "pgstat.h"
|
2007-01-16 14:28:57 +01:00
|
|
|
#include "postmaster/autovacuum.h"
|
2014-02-01 04:45:17 +01:00
|
|
|
#include "replication/slot.h"
|
2011-03-06 23:49:16 +01:00
|
|
|
#include "replication/syncrep.h"
|
Move max_wal_senders out of max_connections for connection slot handling
Since its introduction, max_wal_senders is counted as part of
max_connections when it comes to define how many connection slots can be
used for replication connections with a WAL sender context. This can
lead to confusion for some users, as it could be possible to block a
base backup or replication from happening because other backend sessions
are already taken for other purposes by an application, and
superuser-only connection slots are not a correct solution to handle
that case.
This commit makes max_wal_senders independent of max_connections for its
handling of PGPROC entries in ProcGlobal, meaning that connection slots
for WAL senders are handled using their own free queue, like autovacuum
workers and bgworkers.
One compatibility issue that this change creates is that a standby now
requires to have a value of max_wal_senders at least equal to its
primary. So, if a standby created enforces the value of
max_wal_senders to be lower than that, then this could break failovers.
Normally this should not be an issue though, as any settings of a
standby are inherited from its primary as postgresql.conf gets normally
copied as part of a base backup, so parameters would be consistent.
Author: Alexander Kukushkin
Reviewed-by: Kyotaro Horiguchi, Petr Jelínek, Masahiko Sawada, Oleksii
Kliukin
Discussion: https://postgr.es/m/CAFh8B=nBzHQeYAu0b8fjK-AF1X4+_p6GRtwG+cCgs6Vci2uRuQ@mail.gmail.com
2019-02-12 02:07:56 +01:00
|
|
|
#include "replication/walsender.h"
|
2016-11-22 20:26:40 +01:00
|
|
|
#include "storage/condition_variable.h"
|
2002-05-05 02:03:29 +02:00
|
|
|
#include "storage/ipc.h"
|
2007-06-19 22:13:22 +02:00
|
|
|
#include "storage/lmgr.h"
|
Install a "dead man switch" to allow the postmaster to detect cases where
a backend has done exit(0) or exit(1) without having disengaged itself
from shared memory. We are at risk for this whenever third-party code is
loaded into a backend, since such code might not know it's supposed to go
through proc_exit() instead. Also, it is reported that under Windows
there are ways to externally kill a process that cause the status code
returned to the postmaster to be indistinguishable from a voluntary exit
(thank you, Microsoft). If this does happen then the system is probably
hosed --- for instance, the dead session might still be holding locks.
So the best recovery method is to treat this like a backend crash.
The dead man switch is armed for a particular child process when it
acquires a regular PGPROC, and disarmed when the PGPROC is released;
these should be the first and last touches of shared memory resources
in a backend, or close enough anyway. This choice means there is no
coverage for auxiliary processes, but I doubt we need that, since they
shouldn't be executing any user-provided code anyway.
This patch also improves the management of the EXEC_BACKEND
ShmemBackendArray array a bit, by reducing search costs.
Although this problem is of long standing, the lack of field complaints
seems to mean it's not critical enough to risk back-patching; at least
not till we get some more testing of this mechanism.
2009-05-05 21:59:00 +02:00
|
|
|
#include "storage/pmsignal.h"
|
1996-07-09 08:22:35 +02:00
|
|
|
#include "storage/proc.h"
|
2005-05-19 23:35:48 +02:00
|
|
|
#include "storage/procarray.h"
|
2010-02-13 02:32:20 +01:00
|
|
|
#include "storage/procsignal.h"
|
2001-09-29 06:02:27 +02:00
|
|
|
#include "storage/spin.h"
|
2019-11-12 04:00:16 +01:00
|
|
|
#include "storage/standby.h"
|
Introduce timeout handling framework
Management of timeouts was getting a little cumbersome; what we
originally had was more than enough back when we were only concerned
about deadlocks and query cancel; however, when we added timeouts for
standby processes, the code got considerably messier. Since there are
plans to add more complex timeouts, this seems a good time to introduce
a central timeout handling module.
External modules register their timeout handlers during process
initialization, and later enable and disable them as they see fit using
a simple API; timeout.c is in charge of keeping track of which timeouts
are in effect at any time, installing a common SIGALRM signal handler,
and calling setitimer() as appropriate to ensure timely firing of
external handlers.
timeout.c additionally supports pluggable modules to add their own
timeouts, though this capability isn't exercised anywhere yet.
Additionally, as of this commit, walsender processes are aware of
timeouts; we had a preexisting bug there that made those ignore SIGALRM,
thus being subject to unhandled deadlocks, particularly during the
authentication phase. This has already been fixed in back branches in
commit 0bf8eb2a, which see for more details.
Main author: Zoltán Böszörményi
Some review and cleanup by Álvaro Herrera
Extensive reworking by Tom Lane
2012-07-17 00:43:21 +02:00
|
|
|
#include "utils/timeout.h"
|
2011-09-09 19:23:41 +02:00
|
|
|
#include "utils/timestamp.h"
|
2004-07-17 05:32:14 +02:00
|
|
|
|
2002-10-31 22:34:17 +01:00
|
|
|
/* GUC variables */
|
2000-05-31 02:28:42 +02:00
|
|
|
int DeadlockTimeout = 1000;
|
2002-07-13 03:02:14 +02:00
|
|
|
int StatementTimeout = 0;
|
2013-03-17 04:22:17 +01:00
|
|
|
int LockTimeout = 0;
|
2016-03-16 16:30:45 +01:00
|
|
|
int IdleInTransactionSessionTimeout = 0;
|
2021-01-07 00:28:42 +01:00
|
|
|
int IdleSessionTimeout = 0;
|
2007-03-03 19:46:40 +01:00
|
|
|
bool log_lock_waits = false;
|
1998-08-25 23:20:32 +02:00
|
|
|
|
2020-08-14 23:30:38 +02:00
|
|
|
/* Pointer to this process's PGPROC struct, if any */
|
2002-06-11 15:40:53 +02:00
|
|
|
PGPROC *MyProc = NULL;
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
/*
|
2002-06-11 15:40:53 +02:00
|
|
|
* This spinlock protects the freelist of recycled PGPROC structures.
|
2002-05-05 02:03:29 +02:00
|
|
|
* We cannot use an LWLock because the LWLock manager depends on already
|
2002-06-11 15:40:53 +02:00
|
|
|
* having a PGPROC and a wait semaphore! But these structures are touched
|
2002-05-05 02:03:29 +02:00
|
|
|
* relatively infrequently (only at backend startup or shutdown) and not for
|
|
|
|
* very long, so a spinlock is okay.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2003-12-20 18:31:21 +01:00
|
|
|
NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2004-05-30 00:48:23 +02:00
|
|
|
/* Pointers to shared-memory structures */
|
2011-05-29 01:52:00 +02:00
|
|
|
PROC_HDR *ProcGlobal = NULL;
|
2007-03-07 14:35:03 +01:00
|
|
|
NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
|
2011-11-25 14:02:10 +01:00
|
|
|
PGPROC *PreparedXactProcs = NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2005-12-11 22:02:18 +01:00
|
|
|
/* If we are waiting for a lock, this points to the associated LOCALLOCK */
|
|
|
|
static LOCALLOCK *lockAwaited = NULL;
|
2001-01-14 06:08:17 +01:00
|
|
|
|
2015-02-03 23:24:38 +01:00
|
|
|
static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
|
2002-10-31 22:34:17 +01:00
|
|
|
|
2015-02-03 23:24:38 +01:00
|
|
|
/* Is a deadlock check pending? */
|
|
|
|
static volatile sig_atomic_t got_deadlock_timeout;
|
2002-10-31 22:34:17 +01:00
|
|
|
|
2006-01-04 22:06:32 +01:00
|
|
|
static void RemoveProcFromArray(int code, Datum arg);
|
2003-12-12 19:45:10 +01:00
|
|
|
static void ProcKill(int code, Datum arg);
|
2007-03-07 14:35:03 +01:00
|
|
|
static void AuxiliaryProcKill(int code, Datum arg);
|
2015-02-03 23:24:38 +01:00
|
|
|
static void CheckDeadLock(void);
|
2001-01-14 06:08:17 +01:00
|
|
|
|
1999-05-07 03:23:11 +02:00
|
|
|
|
2004-09-29 17:15:56 +02:00
|
|
|
/*
|
|
|
|
* Report shared-memory space needed by InitProcGlobal.
|
|
|
|
*/
|
2005-08-21 01:26:37 +02:00
|
|
|
Size
|
2005-06-18 00:32:51 +02:00
|
|
|
ProcGlobalShmemSize(void)
|
2004-09-29 17:15:56 +02:00
|
|
|
{
|
2005-08-21 01:26:37 +02:00
|
|
|
Size size = 0;
|
2020-08-14 21:15:38 +02:00
|
|
|
Size TotalProcs =
|
2022-02-08 21:52:40 +01:00
|
|
|
add_size(GetMaxBackends(), add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
|
2005-08-21 01:26:37 +02:00
|
|
|
|
|
|
|
/* ProcGlobal */
|
|
|
|
size = add_size(size, sizeof(PROC_HDR));
|
2020-08-14 21:15:38 +02:00
|
|
|
size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
|
2005-08-21 01:26:37 +02:00
|
|
|
size = add_size(size, sizeof(slock_t));
|
2004-09-29 17:15:56 +02:00
|
|
|
|
2020-08-14 21:15:38 +02:00
|
|
|
size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
|
2020-08-14 23:30:38 +02:00
|
|
|
size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
|
2020-11-16 23:42:55 +01:00
|
|
|
size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
|
2011-11-25 14:02:10 +01:00
|
|
|
|
2004-09-29 17:15:56 +02:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2002-05-05 02:03:29 +02:00
|
|
|
/*
|
|
|
|
* Report number of semaphores needed by InitProcGlobal.
|
|
|
|
*/
|
|
|
|
int
|
2005-06-18 00:32:51 +02:00
|
|
|
ProcGlobalSemas(void)
|
2002-05-05 02:03:29 +02:00
|
|
|
{
|
2007-04-16 20:30:04 +02:00
|
|
|
/*
|
|
|
|
* We need a sema per backend (including autovacuum), plus one for each
|
|
|
|
* auxiliary process.
|
|
|
|
*/
|
2022-02-08 21:52:40 +01:00
|
|
|
return GetMaxBackends() + NUM_AUXILIARY_PROCS;
|
2002-05-05 02:03:29 +02:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
|
|
|
* InitProcGlobal -
|
2006-01-04 22:06:32 +01:00
|
|
|
* Initialize the global process table during postmaster or standalone
|
|
|
|
* backend startup.
|
1999-02-19 07:06:39 +01:00
|
|
|
*
|
2002-05-05 02:03:29 +02:00
|
|
|
* We also create all the per-process semaphores we will need to support
|
1999-02-19 07:06:39 +01:00
|
|
|
* the requested number of backends. We used to allocate semaphores
|
|
|
|
* only when backends were actually started up, but that is bad because
|
|
|
|
* it lets Postgres fail under load --- a lot of Unix systems are
|
|
|
|
* (mis)configured with small limits on the number of semaphores, and
|
|
|
|
* running out when trying to start another backend is a common failure.
|
|
|
|
* So, now we grab enough semaphores to support the desired max number
|
|
|
|
* of backends immediately at initialization --- if the sysadmin has set
|
Move max_wal_senders out of max_connections for connection slot handling
Since its introduction, max_wal_senders is counted as part of
max_connections when it comes to define how many connection slots can be
used for replication connections with a WAL sender context. This can
lead to confusion for some users, as it could be possible to block a
base backup or replication from happening because other backend sessions
are already taken for other purposes by an application, and
superuser-only connection slots are not a correct solution to handle
that case.
This commit makes max_wal_senders independent of max_connections for its
handling of PGPROC entries in ProcGlobal, meaning that connection slots
for WAL senders are handled using their own free queue, like autovacuum
workers and bgworkers.
One compatibility issue that this change creates is that a standby now
requires to have a value of max_wal_senders at least equal to its
primary. So, if a standby created enforces the value of
max_wal_senders to be lower than that, then this could break failovers.
Normally this should not be an issue though, as any settings of a
standby are inherited from its primary as postgresql.conf gets normally
copied as part of a base backup, so parameters would be consistent.
Author: Alexander Kukushkin
Reviewed-by: Kyotaro Horiguchi, Petr Jelínek, Masahiko Sawada, Oleksii
Kliukin
Discussion: https://postgr.es/m/CAFh8B=nBzHQeYAu0b8fjK-AF1X4+_p6GRtwG+cCgs6Vci2uRuQ@mail.gmail.com
2019-02-12 02:07:56 +01:00
|
|
|
* MaxConnections, max_worker_processes, max_wal_senders, or
|
|
|
|
* autovacuum_max_workers higher than his kernel will support, he'll
|
|
|
|
* find out sooner rather than later.
|
2002-05-05 02:03:29 +02:00
|
|
|
*
|
|
|
|
* Another reason for creating semaphores here is that the semaphore
|
|
|
|
* implementation typically requires us to create semaphores in the
|
|
|
|
* postmaster, not in backends.
|
2006-01-04 22:06:32 +01:00
|
|
|
*
|
|
|
|
* Note: this is NOT called by individual backends under a postmaster,
|
2007-03-07 14:35:03 +01:00
|
|
|
* not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
|
2006-01-04 22:06:32 +01:00
|
|
|
* pointers must be propagated specially for EXEC_BACKEND operation.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
void
|
2005-06-18 00:32:51 +02:00
|
|
|
InitProcGlobal(void)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2006-01-04 22:06:32 +01:00
|
|
|
PGPROC *procs;
|
2011-11-02 03:44:54 +01:00
|
|
|
int i,
|
|
|
|
j;
|
2006-01-04 22:06:32 +01:00
|
|
|
bool found;
|
2022-02-08 21:52:40 +01:00
|
|
|
int max_backends = GetMaxBackends();
|
|
|
|
uint32 TotalProcs = max_backends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2006-01-04 22:06:32 +01:00
|
|
|
/* Create the ProcGlobal shared structure */
|
1996-07-09 08:22:35 +02:00
|
|
|
ProcGlobal = (PROC_HDR *)
|
2006-01-04 22:06:32 +01:00
|
|
|
ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
|
|
|
|
Assert(!found);
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2006-01-04 22:06:32 +01:00
|
|
|
/*
|
|
|
|
* Initialize the data structures.
|
|
|
|
*/
|
2011-06-12 06:07:04 +02:00
|
|
|
ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
|
2008-11-02 22:24:52 +01:00
|
|
|
ProcGlobal->freeProcs = NULL;
|
|
|
|
ProcGlobal->autovacFreeProcs = NULL;
|
Background worker processes
Background workers are postmaster subprocesses that run arbitrary
user-specified code. They can request shared memory access as well as
backend database connections; or they can just use plain libpq frontend
database connections.
Modules listed in shared_preload_libraries can register background
workers in their _PG_init() function; this is early enough that it's not
necessary to provide an extra GUC option, because the necessary extra
resources can be allocated early on. Modules can install more than one
bgworker, if necessary.
Care is taken that these extra processes do not interfere with other
postmaster tasks: only one such process is started on each ServerLoop
iteration. This means a large number of them could be waiting to be
started up and postmaster is still able to quickly service external
connection requests. Also, shutdown sequence should not be impacted by
a worker process that's reasonably well behaved (i.e. promptly responds
to termination signals.)
The current implementation lets worker processes specify their start
time, i.e. at what point in the server startup process they are to be
started: right after postmaster start (in which case they mustn't ask
for shared memory access), when consistent state has been reached
(useful during recovery in a HOT standby server), or when recovery has
terminated (i.e. when normal backends are allowed).
In case of a bgworker crash, actions to take depend on registration
data: if shared memory was requested, then all other connections are
taken down (as well as other bgworkers), just like it were a regular
backend crashing. The bgworker itself is restarted, too, within a
configurable timeframe (which can be configured to be never).
More features to add to this framework can be imagined without much
effort, and have been discussed, but this seems good enough as a useful
unit already.
An elementary sample module is supplied.
Author: Álvaro Herrera
This patch is loosely based on prior patches submitted by KaiGai Kohei,
and unsubmitted code by Simon Riggs.
Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund,
Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
|
|
|
ProcGlobal->bgworkerFreeProcs = NULL;
|
Move max_wal_senders out of max_connections for connection slot handling
Since its introduction, max_wal_senders is counted as part of
max_connections when it comes to define how many connection slots can be
used for replication connections with a WAL sender context. This can
lead to confusion for some users, as it could be possible to block a
base backup or replication from happening because other backend sessions
are already taken for other purposes by an application, and
superuser-only connection slots are not a correct solution to handle
that case.
This commit makes max_wal_senders independent of max_connections for its
handling of PGPROC entries in ProcGlobal, meaning that connection slots
for WAL senders are handled using their own free queue, like autovacuum
workers and bgworkers.
One compatibility issue that this change creates is that a standby now
requires to have a value of max_wal_senders at least equal to its
primary. So, if a standby created enforces the value of
max_wal_senders to be lower than that, then this could break failovers.
Normally this should not be an issue though, as any settings of a
standby are inherited from its primary as postgresql.conf gets normally
copied as part of a base backup, so parameters would be consistent.
Author: Alexander Kukushkin
Reviewed-by: Kyotaro Horiguchi, Petr Jelínek, Masahiko Sawada, Oleksii
Kliukin
Discussion: https://postgr.es/m/CAFh8B=nBzHQeYAu0b8fjK-AF1X4+_p6GRtwG+cCgs6Vci2uRuQ@mail.gmail.com
2019-02-12 02:07:56 +01:00
|
|
|
ProcGlobal->walsenderFreeProcs = NULL;
|
2011-08-02 19:23:52 +02:00
|
|
|
ProcGlobal->startupBufferPinWaitBufId = -1;
|
Reduce idle power consumption of walwriter and checkpointer processes.
This patch modifies the walwriter process so that, when it has not found
anything useful to do for many consecutive wakeup cycles, it extends its
sleep time to reduce the server's idle power consumption. It reverts to
normal as soon as it's done any successful flushes. It's still true that
during any async commit, backends check for completed, unflushed pages of
WAL and signal the walwriter if there are any; so that in practice the
walwriter can get awakened and returned to normal operation sooner than the
sleep time might suggest.
Also, improve the checkpointer so that it uses a latch and a computed delay
time to not wake up at all except when it has something to do, replacing a
previous hardcoded 0.5 sec wakeup cycle. This also is primarily useful for
reducing the server's power consumption when idle.
In passing, get rid of the dedicated latch for signaling the walwriter in
favor of using its procLatch, since that comports better with possible
generic signal handlers using that latch. Also, fix a pre-existing bug
with failure to save/restore errno in walwriter's signal handlers.
Peter Geoghegan, somewhat simplified by Tom
2012-05-09 02:03:26 +02:00
|
|
|
ProcGlobal->walwriterLatch = NULL;
|
|
|
|
ProcGlobal->checkpointerLatch = NULL;
|
2016-02-11 14:55:24 +01:00
|
|
|
pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PGPROCNO);
|
2017-09-01 17:45:17 +02:00
|
|
|
pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PGPROCNO);
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2009-08-31 21:41:00 +02:00
|
|
|
/*
|
2012-05-14 09:22:44 +02:00
|
|
|
* Create and initialize all the PGPROC structures we'll need. There are
|
Background worker processes
Background workers are postmaster subprocesses that run arbitrary
user-specified code. They can request shared memory access as well as
backend database connections; or they can just use plain libpq frontend
database connections.
Modules listed in shared_preload_libraries can register background
workers in their _PG_init() function; this is early enough that it's not
necessary to provide an extra GUC option, because the necessary extra
resources can be allocated early on. Modules can install more than one
bgworker, if necessary.
Care is taken that these extra processes do not interfere with other
postmaster tasks: only one such process is started on each ServerLoop
iteration. This means a large number of them could be waiting to be
started up and postmaster is still able to quickly service external
connection requests. Also, shutdown sequence should not be impacted by
a worker process that's reasonably well behaved (i.e. promptly responds
to termination signals.)
The current implementation lets worker processes specify their start
time, i.e. at what point in the server startup process they are to be
started: right after postmaster start (in which case they mustn't ask
for shared memory access), when consistent state has been reached
(useful during recovery in a HOT standby server), or when recovery has
terminated (i.e. when normal backends are allowed).
In case of a bgworker crash, actions to take depend on registration
data: if shared memory was requested, then all other connections are
taken down (as well as other bgworkers), just like it were a regular
backend crashing. The bgworker itself is restarted, too, within a
configurable timeframe (which can be configured to be never).
More features to add to this framework can be imagined without much
effort, and have been discussed, but this seems good enough as a useful
unit already.
An elementary sample module is supplied.
Author: Álvaro Herrera
This patch is loosely based on prior patches submitted by KaiGai Kohei,
and unsubmitted code by Simon Riggs.
Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund,
Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
|
|
|
* five separate consumers: (1) normal backends, (2) autovacuum workers
|
|
|
|
* and the autovacuum launcher, (3) background workers, (4) auxiliary
|
|
|
|
* processes, and (5) prepared transactions. Each PGPROC structure is
|
|
|
|
* dedicated to exactly one of these purposes, and they do not move
|
|
|
|
* between groups.
|
2009-08-31 21:41:00 +02:00
|
|
|
*/
|
2011-06-12 06:07:04 +02:00
|
|
|
procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
|
Change API of ShmemAlloc() so it throws error rather than returning NULL.
A majority of callers seem to have believed that this was the API spec
already, because they omitted any check for a NULL result, and hence
would crash on an out-of-shared-memory failure. The original proposal
was to just add such error checks everywhere, but that does nothing to
prevent similar omissions in future. Instead, let's make ShmemAlloc()
throw the error (so we can remove the caller-side checks that do exist),
and introduce a new function ShmemAllocNoError() that has the previous
behavior of returning NULL, for the small number of callers that need
that and are prepared to do the right thing. This also lets us remove
the rather wishy-washy behavior of printing a WARNING for out-of-shmem,
which never made much sense: either the caller has a strategy for
dealing with that, or it doesn't. It's not ShmemAlloc's business to
decide whether a warning is appropriate.
The v10 release notes will need to call this out as a significant
source-code change. It's likely that it will be a bug fix for
extension callers too, but if not, they'll need to change to using
ShmemAllocNoError().
This is nominally a bug fix, but the odds that it's fixing any live
bug are actually rather small, because in general the requests
being made by the unchecked callers were already accounted for in
determining the overall shmem size, so really they ought not fail.
Between that and the possible impact on extensions, no back-patch.
Discussion: <24843.1472563085@sss.pgh.pa.us>
2016-09-01 16:13:55 +02:00
|
|
|
MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
|
2011-05-29 01:52:00 +02:00
|
|
|
ProcGlobal->allProcs = procs;
|
2014-01-27 17:07:44 +01:00
|
|
|
/* XXX allProcCount isn't really all of them; it excludes prepared xacts */
|
2022-02-08 21:52:40 +01:00
|
|
|
ProcGlobal->allProcCount = max_backends + NUM_AUXILIARY_PROCS;
|
2011-11-25 14:02:10 +01:00
|
|
|
|
2020-08-14 21:15:38 +02:00
|
|
|
/*
|
|
|
|
* Allocate arrays mirroring PGPROC fields in a dense manner. See
|
|
|
|
* PROC_HDR.
|
|
|
|
*
|
|
|
|
* XXX: It might make sense to increase padding for these arrays, given
|
|
|
|
* how hotly they are accessed.
|
|
|
|
*/
|
|
|
|
ProcGlobal->xids =
|
|
|
|
(TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
|
|
|
|
MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
|
2020-08-14 23:30:38 +02:00
|
|
|
ProcGlobal->subxidStates = (XidCacheStatus *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->subxidStates));
|
|
|
|
MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
|
2020-11-16 23:42:55 +01:00
|
|
|
ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
|
|
|
|
MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
|
2020-08-14 21:15:38 +02:00
|
|
|
|
2011-06-12 06:07:04 +02:00
|
|
|
for (i = 0; i < TotalProcs; i++)
|
2007-04-16 20:30:04 +02:00
|
|
|
{
|
2011-06-12 06:07:04 +02:00
|
|
|
/* Common initialization for all PGPROCs, regardless of type. */
|
2011-08-10 18:20:30 +02:00
|
|
|
|
2011-11-25 14:02:10 +01:00
|
|
|
/*
|
2020-05-16 00:11:03 +02:00
|
|
|
* Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
|
2011-11-25 14:02:10 +01:00
|
|
|
* dummy PGPROCs don't need these though - they're never associated
|
|
|
|
* with a real process
|
|
|
|
*/
|
2022-02-08 21:52:40 +01:00
|
|
|
if (i < max_backends + NUM_AUXILIARY_PROCS)
|
2011-11-25 14:02:10 +01:00
|
|
|
{
|
Make the different Unix-y semaphore implementations ABI-compatible.
Previously, the "sem" field of PGPROC varied in size depending on which
kernel semaphore API we were using. That was okay as long as there was
only one likely choice per platform, but in the wake of commit ecb0d20a9,
that assumption seems rather shaky. It doesn't seem out of the question
anymore that an extension compiled against one API choice might be loaded
into a postmaster built with another choice. Moreover, this prevents any
possibility of selecting the semaphore API at postmaster startup, which
might be something we want to do in future.
Hence, change PGPROC.sem to be PGSemaphore (i.e. a pointer) for all Unix
semaphore APIs, and turn the pointed-to data into an opaque struct whose
contents are only known within the responsible modules.
For the SysV and unnamed-POSIX APIs, the pointed-to data has to be
allocated elsewhere in shared memory, which takes a little bit of
rejiggering of the InitShmemAllocation code sequence. (I invented a
ShmemAllocUnlocked() function to make that a little cleaner than it used
to be. That function is not meant for any uses other than the ones it
has now, but it beats having InitShmemAllocation() know explicitly about
allocation of space for semaphores and spinlocks.) This change means an
extra indirection to access the semaphore data, but since we only touch
that when blocking or awakening a process, there shouldn't be any
meaningful performance penalty. Moreover, at least for the unnamed-POSIX
case on Linux, the sem_t type is quite a bit wider than a pointer, so this
reduces sizeof(PGPROC) which seems like a good thing.
For the named-POSIX API, there's effectively no change: the PGPROC.sem
field was and still is a pointer to something returned by sem_open() in
the postmaster's memory space. Document and check the pre-existing
limitation that this case can't work in EXEC_BACKEND mode.
It did not seem worth unifying the Windows semaphore ABI with the Unix
cases, since there's no likelihood of needing ABI compatibility much less
runtime switching across those cases. However, we can simplify the Windows
code a bit if we define PGSemaphore as being directly a HANDLE, rather than
pointer to HANDLE, so let's do that while we're here. (This also ends up
being no change in what's physically stored in PGPROC.sem. We're just
moving the HANDLE fetch from callees to callers.)
It would take a bunch of additional code shuffling to get to the point of
actually choosing a semaphore API at postmaster start, but the effects
of that would now be localized in the port/XXX_sema.c files, so it seems
like fit material for a separate patch. The need for it is unproven as
yet, anyhow, whereas the ABI risk to extensions seems real enough.
Discussion: https://postgr.es/m/4029.1481413370@sss.pgh.pa.us
2016-12-12 19:32:10 +01:00
|
|
|
procs[i].sem = PGSemaphoreCreate();
|
2011-11-25 14:02:10 +01:00
|
|
|
InitSharedLatch(&(procs[i].procLatch));
|
2020-05-16 00:11:03 +02:00
|
|
|
LWLockInitialize(&(procs[i].fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
|
2011-11-25 14:02:10 +01:00
|
|
|
}
|
|
|
|
procs[i].pgprocno = i;
|
2011-06-12 06:07:04 +02:00
|
|
|
|
|
|
|
/*
|
Background worker processes
Background workers are postmaster subprocesses that run arbitrary
user-specified code. They can request shared memory access as well as
backend database connections; or they can just use plain libpq frontend
database connections.
Modules listed in shared_preload_libraries can register background
workers in their _PG_init() function; this is early enough that it's not
necessary to provide an extra GUC option, because the necessary extra
resources can be allocated early on. Modules can install more than one
bgworker, if necessary.
Care is taken that these extra processes do not interfere with other
postmaster tasks: only one such process is started on each ServerLoop
iteration. This means a large number of them could be waiting to be
started up and postmaster is still able to quickly service external
connection requests. Also, shutdown sequence should not be impacted by
a worker process that's reasonably well behaved (i.e. promptly responds
to termination signals.)
The current implementation lets worker processes specify their start
time, i.e. at what point in the server startup process they are to be
started: right after postmaster start (in which case they mustn't ask
for shared memory access), when consistent state has been reached
(useful during recovery in a HOT standby server), or when recovery has
terminated (i.e. when normal backends are allowed).
In case of a bgworker crash, actions to take depend on registration
data: if shared memory was requested, then all other connections are
taken down (as well as other bgworkers), just like it were a regular
backend crashing. The bgworker itself is restarted, too, within a
configurable timeframe (which can be configured to be never).
More features to add to this framework can be imagined without much
effort, and have been discussed, but this seems good enough as a useful
unit already.
An elementary sample module is supplied.
Author: Álvaro Herrera
This patch is loosely based on prior patches submitted by KaiGai Kohei,
and unsubmitted code by Simon Riggs.
Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund,
Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
|
|
|
* Newly created PGPROCs for normal backends, autovacuum and bgworkers
|
|
|
|
* must be queued up on the appropriate free list. Because there can
|
|
|
|
* only ever be a small, fixed number of auxiliary processes, no free
|
|
|
|
* list is used in that case; InitAuxiliaryProcess() instead uses a
|
|
|
|
* linear search. PGPROCs for prepared transactions are added to a
|
|
|
|
* free list by TwoPhaseShmemInit().
|
2011-06-12 06:07:04 +02:00
|
|
|
*/
|
|
|
|
if (i < MaxConnections)
|
|
|
|
{
|
|
|
|
/* PGPROC for normal backend, add to freeProcs list */
|
|
|
|
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
|
|
|
|
ProcGlobal->freeProcs = &procs[i];
|
2015-07-28 20:51:57 +02:00
|
|
|
procs[i].procgloballist = &ProcGlobal->freeProcs;
|
2011-06-12 06:07:04 +02:00
|
|
|
}
|
Background worker processes
Background workers are postmaster subprocesses that run arbitrary
user-specified code. They can request shared memory access as well as
backend database connections; or they can just use plain libpq frontend
database connections.
Modules listed in shared_preload_libraries can register background
workers in their _PG_init() function; this is early enough that it's not
necessary to provide an extra GUC option, because the necessary extra
resources can be allocated early on. Modules can install more than one
bgworker, if necessary.
Care is taken that these extra processes do not interfere with other
postmaster tasks: only one such process is started on each ServerLoop
iteration. This means a large number of them could be waiting to be
started up and postmaster is still able to quickly service external
connection requests. Also, shutdown sequence should not be impacted by
a worker process that's reasonably well behaved (i.e. promptly responds
to termination signals.)
The current implementation lets worker processes specify their start
time, i.e. at what point in the server startup process they are to be
started: right after postmaster start (in which case they mustn't ask
for shared memory access), when consistent state has been reached
(useful during recovery in a HOT standby server), or when recovery has
terminated (i.e. when normal backends are allowed).
In case of a bgworker crash, actions to take depend on registration
data: if shared memory was requested, then all other connections are
taken down (as well as other bgworkers), just like it were a regular
backend crashing. The bgworker itself is restarted, too, within a
configurable timeframe (which can be configured to be never).
More features to add to this framework can be imagined without much
effort, and have been discussed, but this seems good enough as a useful
unit already.
An elementary sample module is supplied.
Author: Álvaro Herrera
This patch is loosely based on prior patches submitted by KaiGai Kohei,
and unsubmitted code by Simon Riggs.
Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund,
Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
|
|
|
else if (i < MaxConnections + autovacuum_max_workers + 1)
|
2011-06-12 06:07:04 +02:00
|
|
|
{
|
|
|
|
/* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
|
|
|
|
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
|
|
|
|
ProcGlobal->autovacFreeProcs = &procs[i];
|
2015-07-28 20:51:57 +02:00
|
|
|
procs[i].procgloballist = &ProcGlobal->autovacFreeProcs;
|
2011-06-12 06:07:04 +02:00
|
|
|
}
|
Move max_wal_senders out of max_connections for connection slot handling
Since its introduction, max_wal_senders is counted as part of
max_connections when it comes to define how many connection slots can be
used for replication connections with a WAL sender context. This can
lead to confusion for some users, as it could be possible to block a
base backup or replication from happening because other backend sessions
are already taken for other purposes by an application, and
superuser-only connection slots are not a correct solution to handle
that case.
This commit makes max_wal_senders independent of max_connections for its
handling of PGPROC entries in ProcGlobal, meaning that connection slots
for WAL senders are handled using their own free queue, like autovacuum
workers and bgworkers.
One compatibility issue that this change creates is that a standby now
requires to have a value of max_wal_senders at least equal to its
primary. So, if a standby created enforces the value of
max_wal_senders to be lower than that, then this could break failovers.
Normally this should not be an issue though, as any settings of a
standby are inherited from its primary as postgresql.conf gets normally
copied as part of a base backup, so parameters would be consistent.
Author: Alexander Kukushkin
Reviewed-by: Kyotaro Horiguchi, Petr Jelínek, Masahiko Sawada, Oleksii
Kliukin
Discussion: https://postgr.es/m/CAFh8B=nBzHQeYAu0b8fjK-AF1X4+_p6GRtwG+cCgs6Vci2uRuQ@mail.gmail.com
2019-02-12 02:07:56 +01:00
|
|
|
else if (i < MaxConnections + autovacuum_max_workers + 1 + max_worker_processes)
|
Background worker processes
Background workers are postmaster subprocesses that run arbitrary
user-specified code. They can request shared memory access as well as
backend database connections; or they can just use plain libpq frontend
database connections.
Modules listed in shared_preload_libraries can register background
workers in their _PG_init() function; this is early enough that it's not
necessary to provide an extra GUC option, because the necessary extra
resources can be allocated early on. Modules can install more than one
bgworker, if necessary.
Care is taken that these extra processes do not interfere with other
postmaster tasks: only one such process is started on each ServerLoop
iteration. This means a large number of them could be waiting to be
started up and postmaster is still able to quickly service external
connection requests. Also, shutdown sequence should not be impacted by
a worker process that's reasonably well behaved (i.e. promptly responds
to termination signals.)
The current implementation lets worker processes specify their start
time, i.e. at what point in the server startup process they are to be
started: right after postmaster start (in which case they mustn't ask
for shared memory access), when consistent state has been reached
(useful during recovery in a HOT standby server), or when recovery has
terminated (i.e. when normal backends are allowed).
In case of a bgworker crash, actions to take depend on registration
data: if shared memory was requested, then all other connections are
taken down (as well as other bgworkers), just like it were a regular
backend crashing. The bgworker itself is restarted, too, within a
configurable timeframe (which can be configured to be never).
More features to add to this framework can be imagined without much
effort, and have been discussed, but this seems good enough as a useful
unit already.
An elementary sample module is supplied.
Author: Álvaro Herrera
This patch is loosely based on prior patches submitted by KaiGai Kohei,
and unsubmitted code by Simon Riggs.
Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund,
Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
|
|
|
{
|
|
|
|
/* PGPROC for bgworker, add to bgworkerFreeProcs list */
|
|
|
|
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
|
|
|
|
ProcGlobal->bgworkerFreeProcs = &procs[i];
|
2015-07-28 20:51:57 +02:00
|
|
|
procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
|
Background worker processes
Background workers are postmaster subprocesses that run arbitrary
user-specified code. They can request shared memory access as well as
backend database connections; or they can just use plain libpq frontend
database connections.
Modules listed in shared_preload_libraries can register background
workers in their _PG_init() function; this is early enough that it's not
necessary to provide an extra GUC option, because the necessary extra
resources can be allocated early on. Modules can install more than one
bgworker, if necessary.
Care is taken that these extra processes do not interfere with other
postmaster tasks: only one such process is started on each ServerLoop
iteration. This means a large number of them could be waiting to be
started up and postmaster is still able to quickly service external
connection requests. Also, shutdown sequence should not be impacted by
a worker process that's reasonably well behaved (i.e. promptly responds
to termination signals.)
The current implementation lets worker processes specify their start
time, i.e. at what point in the server startup process they are to be
started: right after postmaster start (in which case they mustn't ask
for shared memory access), when consistent state has been reached
(useful during recovery in a HOT standby server), or when recovery has
terminated (i.e. when normal backends are allowed).
In case of a bgworker crash, actions to take depend on registration
data: if shared memory was requested, then all other connections are
taken down (as well as other bgworkers), just like it were a regular
backend crashing. The bgworker itself is restarted, too, within a
configurable timeframe (which can be configured to be never).
More features to add to this framework can be imagined without much
effort, and have been discussed, but this seems good enough as a useful
unit already.
An elementary sample module is supplied.
Author: Álvaro Herrera
This patch is loosely based on prior patches submitted by KaiGai Kohei,
and unsubmitted code by Simon Riggs.
Reviewed by: KaiGai Kohei, Markus Wanner, Andres Freund,
Heikki Linnakangas, Simon Riggs, Amit Kapila
2012-12-06 18:57:52 +01:00
|
|
|
}
|
2022-02-08 21:52:40 +01:00
|
|
|
else if (i < max_backends)
|
Move max_wal_senders out of max_connections for connection slot handling
Since its introduction, max_wal_senders is counted as part of
max_connections when it comes to define how many connection slots can be
used for replication connections with a WAL sender context. This can
lead to confusion for some users, as it could be possible to block a
base backup or replication from happening because other backend sessions
are already taken for other purposes by an application, and
superuser-only connection slots are not a correct solution to handle
that case.
This commit makes max_wal_senders independent of max_connections for its
handling of PGPROC entries in ProcGlobal, meaning that connection slots
for WAL senders are handled using their own free queue, like autovacuum
workers and bgworkers.
One compatibility issue that this change creates is that a standby now
requires to have a value of max_wal_senders at least equal to its
primary. So, if a standby created enforces the value of
max_wal_senders to be lower than that, then this could break failovers.
Normally this should not be an issue though, as any settings of a
standby are inherited from its primary as postgresql.conf gets normally
copied as part of a base backup, so parameters would be consistent.
Author: Alexander Kukushkin
Reviewed-by: Kyotaro Horiguchi, Petr Jelínek, Masahiko Sawada, Oleksii
Kliukin
Discussion: https://postgr.es/m/CAFh8B=nBzHQeYAu0b8fjK-AF1X4+_p6GRtwG+cCgs6Vci2uRuQ@mail.gmail.com
2019-02-12 02:07:56 +01:00
|
|
|
{
|
|
|
|
/* PGPROC for walsender, add to walsenderFreeProcs list */
|
|
|
|
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->walsenderFreeProcs;
|
|
|
|
ProcGlobal->walsenderFreeProcs = &procs[i];
|
|
|
|
procs[i].procgloballist = &ProcGlobal->walsenderFreeProcs;
|
|
|
|
}
|
2011-11-02 03:44:54 +01:00
|
|
|
|
|
|
|
/* Initialize myProcLocks[] shared memory queues. */
|
|
|
|
for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
|
|
|
|
SHMQueueInit(&(procs[i].myProcLocks[j]));
|
2016-02-07 16:16:13 +01:00
|
|
|
|
|
|
|
/* Initialize lockGroupMembers list. */
|
|
|
|
dlist_init(&procs[i].lockGroupMembers);
|
2018-11-13 06:22:40 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the atomic variables, otherwise, it won't be safe to
|
|
|
|
* access them for backends that aren't currently in use.
|
|
|
|
*/
|
|
|
|
pg_atomic_init_u32(&(procs[i].procArrayGroupNext), INVALID_PGPROCNO);
|
|
|
|
pg_atomic_init_u32(&(procs[i].clogGroupNext), INVALID_PGPROCNO);
|
2021-02-22 10:25:00 +01:00
|
|
|
pg_atomic_init_u64(&(procs[i].waitStart), 0);
|
2007-04-16 20:30:04 +02:00
|
|
|
}
|
|
|
|
|
2009-08-31 21:41:00 +02:00
|
|
|
/*
|
2011-11-25 14:02:10 +01:00
|
|
|
* Save pointers to the blocks of PGPROC structures reserved for auxiliary
|
|
|
|
* processes and prepared transactions.
|
2009-08-31 21:41:00 +02:00
|
|
|
*/
|
2022-02-08 21:52:40 +01:00
|
|
|
AuxiliaryProcs = &procs[max_backends];
|
|
|
|
PreparedXactProcs = &procs[max_backends + NUM_AUXILIARY_PROCS];
|
2006-01-04 22:06:32 +01:00
|
|
|
|
|
|
|
/* Create ProcStructLock spinlock, too */
|
|
|
|
ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
|
|
|
|
SpinLockInit(ProcStructLock);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
/*
|
2002-05-05 02:03:29 +02:00
|
|
|
* InitProcess -- initialize a per-process data structure for this backend
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
void
|
2000-11-29 00:27:57 +01:00
|
|
|
InitProcess(void)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2015-07-28 20:51:57 +02:00
|
|
|
PGPROC *volatile *procgloballist;
|
2001-09-07 02:27:30 +02:00
|
|
|
|
|
|
|
/*
|
2006-01-04 22:06:32 +01:00
|
|
|
* ProcGlobal should be set up already (if we are a backend, we inherit
|
|
|
|
* this by fork() or EXEC_BACKEND mechanism from the postmaster).
|
2001-09-07 02:27:30 +02:00
|
|
|
*/
|
2015-10-16 20:20:36 +02:00
|
|
|
if (ProcGlobal == NULL)
|
2003-07-25 00:04:15 +02:00
|
|
|
elog(PANIC, "proc header uninitialized");
|
2001-09-07 02:27:30 +02:00
|
|
|
|
|
|
|
if (MyProc != NULL)
|
2003-07-25 00:04:15 +02:00
|
|
|
elog(ERROR, "you already exist");
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2015-07-28 20:51:57 +02:00
|
|
|
/* Decide which list should supply our PGPROC. */
|
|
|
|
if (IsAnyAutoVacuumProcess())
|
2015-10-16 20:20:36 +02:00
|
|
|
procgloballist = &ProcGlobal->autovacFreeProcs;
|
2015-07-28 20:51:57 +02:00
|
|
|
else if (IsBackgroundWorker)
|
2015-10-16 20:20:36 +02:00
|
|
|
procgloballist = &ProcGlobal->bgworkerFreeProcs;
|
Move max_wal_senders out of max_connections for connection slot handling
Since its introduction, max_wal_senders is counted as part of
max_connections when it comes to define how many connection slots can be
used for replication connections with a WAL sender context. This can
lead to confusion for some users, as it could be possible to block a
base backup or replication from happening because other backend sessions
are already taken for other purposes by an application, and
superuser-only connection slots are not a correct solution to handle
that case.
This commit makes max_wal_senders independent of max_connections for its
handling of PGPROC entries in ProcGlobal, meaning that connection slots
for WAL senders are handled using their own free queue, like autovacuum
workers and bgworkers.
One compatibility issue that this change creates is that a standby now
requires to have a value of max_wal_senders at least equal to its
primary. So, if a standby created enforces the value of
max_wal_senders to be lower than that, then this could break failovers.
Normally this should not be an issue though, as any settings of a
standby are inherited from its primary as postgresql.conf gets normally
copied as part of a base backup, so parameters would be consistent.
Author: Alexander Kukushkin
Reviewed-by: Kyotaro Horiguchi, Petr Jelínek, Masahiko Sawada, Oleksii
Kliukin
Discussion: https://postgr.es/m/CAFh8B=nBzHQeYAu0b8fjK-AF1X4+_p6GRtwG+cCgs6Vci2uRuQ@mail.gmail.com
2019-02-12 02:07:56 +01:00
|
|
|
else if (am_walsender)
|
|
|
|
procgloballist = &ProcGlobal->walsenderFreeProcs;
|
2015-07-28 20:51:57 +02:00
|
|
|
else
|
2015-10-16 20:20:36 +02:00
|
|
|
procgloballist = &ProcGlobal->freeProcs;
|
2015-07-28 20:51:57 +02:00
|
|
|
|
2001-09-04 23:42:17 +02:00
|
|
|
/*
|
2015-07-28 20:51:57 +02:00
|
|
|
* Try to get a proc struct from the appropriate free list. If this
|
|
|
|
* fails, we must be out of PGPROC structures (not to mention semaphores).
|
2005-10-11 22:41:32 +02:00
|
|
|
*
|
|
|
|
* While we are holding the ProcStructLock, also copy the current shared
|
|
|
|
* estimate of spins_per_delay to local storage.
|
2001-09-04 23:42:17 +02:00
|
|
|
*/
|
2001-09-29 06:02:27 +02:00
|
|
|
SpinLockAcquire(ProcStructLock);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2015-10-16 20:20:36 +02:00
|
|
|
set_spins_per_delay(ProcGlobal->spins_per_delay);
|
2005-10-11 22:41:32 +02:00
|
|
|
|
2015-07-28 20:51:57 +02:00
|
|
|
MyProc = *procgloballist;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2008-11-02 22:24:52 +01:00
|
|
|
if (MyProc != NULL)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2015-07-28 20:51:57 +02:00
|
|
|
*procgloballist = (PGPROC *) MyProc->links.next;
|
2001-09-29 06:02:27 +02:00
|
|
|
SpinLockRelease(ProcStructLock);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2002-06-11 15:40:53 +02:00
|
|
|
* If we reach here, all the PGPROCs are in use. This is one of the
|
2002-05-05 02:03:29 +02:00
|
|
|
* possible places to detect "too many backends", so give the standard
|
2007-04-16 20:30:04 +02:00
|
|
|
* error message. XXX do we need to give a different failure message
|
|
|
|
* in the autovacuum case?
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2001-09-29 06:02:27 +02:00
|
|
|
SpinLockRelease(ProcStructLock);
|
Move max_wal_senders out of max_connections for connection slot handling
Since its introduction, max_wal_senders is counted as part of
max_connections when it comes to define how many connection slots can be
used for replication connections with a WAL sender context. This can
lead to confusion for some users, as it could be possible to block a
base backup or replication from happening because other backend sessions
are already taken for other purposes by an application, and
superuser-only connection slots are not a correct solution to handle
that case.
This commit makes max_wal_senders independent of max_connections for its
handling of PGPROC entries in ProcGlobal, meaning that connection slots
for WAL senders are handled using their own free queue, like autovacuum
workers and bgworkers.
One compatibility issue that this change creates is that a standby now
requires to have a value of max_wal_senders at least equal to its
primary. So, if a standby created enforces the value of
max_wal_senders to be lower than that, then this could break failovers.
Normally this should not be an issue though, as any settings of a
standby are inherited from its primary as postgresql.conf gets normally
copied as part of a base backup, so parameters would be consistent.
Author: Alexander Kukushkin
Reviewed-by: Kyotaro Horiguchi, Petr Jelínek, Masahiko Sawada, Oleksii
Kliukin
Discussion: https://postgr.es/m/CAFh8B=nBzHQeYAu0b8fjK-AF1X4+_p6GRtwG+cCgs6Vci2uRuQ@mail.gmail.com
2019-02-12 02:07:56 +01:00
|
|
|
if (am_walsender)
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
|
|
|
|
errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
|
|
|
|
max_wal_senders)));
|
2003-07-25 00:04:15 +02:00
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
|
|
|
|
errmsg("sorry, too many clients already")));
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2015-07-28 20:51:57 +02:00
|
|
|
/*
|
|
|
|
* Cross-check that the PGPROC is of the type we expect; if this were not
|
|
|
|
* the case, it would get returned to the wrong list.
|
|
|
|
*/
|
|
|
|
Assert(MyProc->procgloballist == procgloballist);
|
|
|
|
|
Install a "dead man switch" to allow the postmaster to detect cases where
a backend has done exit(0) or exit(1) without having disengaged itself
from shared memory. We are at risk for this whenever third-party code is
loaded into a backend, since such code might not know it's supposed to go
through proc_exit() instead. Also, it is reported that under Windows
there are ways to externally kill a process that cause the status code
returned to the postmaster to be indistinguishable from a voluntary exit
(thank you, Microsoft). If this does happen then the system is probably
hosed --- for instance, the dead session might still be holding locks.
So the best recovery method is to treat this like a backend crash.
The dead man switch is armed for a particular child process when it
acquires a regular PGPROC, and disarmed when the PGPROC is released;
these should be the first and last touches of shared memory resources
in a backend, or close enough anyway. This choice means there is no
coverage for auxiliary processes, but I doubt we need that, since they
shouldn't be executing any user-provided code anyway.
This patch also improves the management of the EXEC_BACKEND
ShmemBackendArray array a bit, by reducing search costs.
Although this problem is of long standing, the lack of field complaints
seems to mean it's not critical enough to risk back-patching; at least
not till we get some more testing of this mechanism.
2009-05-05 21:59:00 +02:00
|
|
|
/*
|
|
|
|
* Now that we have a PGPROC, mark ourselves as an active postmaster
|
|
|
|
* child; this is so that the postmaster can detect it if we exit without
|
2009-08-31 21:41:00 +02:00
|
|
|
* cleaning up. (XXX autovac launcher currently doesn't participate in
|
|
|
|
* this; it probably should.)
|
Install a "dead man switch" to allow the postmaster to detect cases where
a backend has done exit(0) or exit(1) without having disengaged itself
from shared memory. We are at risk for this whenever third-party code is
loaded into a backend, since such code might not know it's supposed to go
through proc_exit() instead. Also, it is reported that under Windows
there are ways to externally kill a process that cause the status code
returned to the postmaster to be indistinguishable from a voluntary exit
(thank you, Microsoft). If this does happen then the system is probably
hosed --- for instance, the dead session might still be holding locks.
So the best recovery method is to treat this like a backend crash.
The dead man switch is armed for a particular child process when it
acquires a regular PGPROC, and disarmed when the PGPROC is released;
these should be the first and last touches of shared memory resources
in a backend, or close enough anyway. This choice means there is no
coverage for auxiliary processes, but I doubt we need that, since they
shouldn't be executing any user-provided code anyway.
This patch also improves the management of the EXEC_BACKEND
ShmemBackendArray array a bit, by reducing search costs.
Although this problem is of long standing, the lack of field complaints
seems to mean it's not critical enough to risk back-patching; at least
not till we get some more testing of this mechanism.
2009-05-05 21:59:00 +02:00
|
|
|
*/
|
2009-08-31 21:41:00 +02:00
|
|
|
if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
|
2010-08-23 19:20:01 +02:00
|
|
|
MarkPostmasterChildActive();
|
Install a "dead man switch" to allow the postmaster to detect cases where
a backend has done exit(0) or exit(1) without having disengaged itself
from shared memory. We are at risk for this whenever third-party code is
loaded into a backend, since such code might not know it's supposed to go
through proc_exit() instead. Also, it is reported that under Windows
there are ways to externally kill a process that cause the status code
returned to the postmaster to be indistinguishable from a voluntary exit
(thank you, Microsoft). If this does happen then the system is probably
hosed --- for instance, the dead session might still be holding locks.
So the best recovery method is to treat this like a backend crash.
The dead man switch is armed for a particular child process when it
acquires a regular PGPROC, and disarmed when the PGPROC is released;
these should be the first and last touches of shared memory resources
in a backend, or close enough anyway. This choice means there is no
coverage for auxiliary processes, but I doubt we need that, since they
shouldn't be executing any user-provided code anyway.
This patch also improves the management of the EXEC_BACKEND
ShmemBackendArray array a bit, by reducing search costs.
Although this problem is of long standing, the lack of field complaints
seems to mean it's not critical enough to risk back-patching; at least
not till we get some more testing of this mechanism.
2009-05-05 21:59:00 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2011-11-02 03:44:54 +01:00
|
|
|
* Initialize all fields of MyProc, except for those previously
|
|
|
|
* initialized by InitProcGlobal.
|
2001-01-14 06:08:17 +01:00
|
|
|
*/
|
2001-01-22 23:30:06 +01:00
|
|
|
SHMQueueElemInit(&(MyProc->links));
|
2020-06-17 09:14:37 +02:00
|
|
|
MyProc->waitStatus = PROC_WAIT_STATUS_OK;
|
2007-09-05 20:10:48 +02:00
|
|
|
MyProc->lxid = InvalidLocalTransactionId;
|
2012-11-29 23:15:52 +01:00
|
|
|
MyProc->fpVXIDLock = false;
|
|
|
|
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
|
2020-08-14 21:15:38 +02:00
|
|
|
MyProc->xid = InvalidTransactionId;
|
2020-08-14 01:25:21 +02:00
|
|
|
MyProc->xmin = InvalidTransactionId;
|
2001-09-29 06:02:27 +02:00
|
|
|
MyProc->pid = MyProcPid;
|
2007-09-05 20:10:48 +02:00
|
|
|
/* backendId, databaseId and roleId will be filled in later */
|
|
|
|
MyProc->backendId = InvalidBackendId;
|
2006-01-04 22:06:32 +01:00
|
|
|
MyProc->databaseId = InvalidOid;
|
2005-07-31 19:19:22 +02:00
|
|
|
MyProc->roleId = InvalidOid;
|
Make autovacuum more aggressive to remove orphaned temp tables
Commit dafa084, added in 10, made the removal of temporary orphaned
tables more aggressive. This commit makes an extra step into the
aggressiveness by adding a flag in each backend's MyProc which tracks
down any temporary namespace currently in use. The flag is set when the
namespace gets created and can be reset if the temporary namespace has
been created in a transaction or sub-transaction which is aborted. The
flag value assignment is assumed to be atomic, so this can be done in a
lock-less fashion like other flags already present in PGPROC like
databaseId or backendId, still the fact that the temporary namespace and
table created are still locked until the transaction creating those
commits acts as a barrier for other backends.
This new flag gets used by autovacuum to discard more aggressively
orphaned tables by additionally checking for the database a backend is
connected to as well as its temporary namespace in-use, removing
orphaned temporary relations even if a backend reuses the same slot as
one which created temporary relations in a past session.
The base idea of this patch comes from Robert Haas, has been written in
its first version by Tsunakawa Takayuki, then heavily reviewed by me.
Author: Tsunakawa Takayuki
Reviewed-by: Michael Paquier, Kyotaro Horiguchi, Andres Freund
Discussion: https://postgr.es/m/0A3221C70F24FB45833433255569204D1F8A4DC6@G01JPEXMBYT05
Backpatch: 11-, as PGPROC gains a new flag and we don't want silent ABI
breakages on already released versions.
2018-08-13 11:49:04 +02:00
|
|
|
MyProc->tempNamespaceId = InvalidOid;
|
2017-02-01 23:52:35 +01:00
|
|
|
MyProc->isBackgroundWorker = IsBackgroundWorker;
|
2022-03-24 19:32:06 +01:00
|
|
|
MyProc->delayChkpt = 0;
|
2020-11-16 23:42:55 +01:00
|
|
|
MyProc->statusFlags = 0;
|
2009-08-31 21:41:00 +02:00
|
|
|
/* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
|
2007-10-24 22:55:36 +02:00
|
|
|
if (IsAutoVacuumWorkerProcess())
|
2020-11-16 23:42:55 +01:00
|
|
|
MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
|
2001-09-29 06:02:27 +02:00
|
|
|
MyProc->lwWaiting = false;
|
Make group commit more effective.
When a backend needs to flush the WAL, and someone else is already flushing
the WAL, wait until it releases the WALInsertLock and check if we still need
to do the flush or if the other backend already did the work for us, before
acquiring WALInsertLock. This helps group commit, because when the WAL flush
finishes, all the backends that were waiting for it can be woken up in one
go, and the can all concurrently observe that they're done, rather than
waking them up one by one in a cascading fashion.
This is based on a new LWLock function, LWLockWaitUntilFree(), which has
peculiar semantics. If the lock is immediately free, it grabs the lock and
returns true. If it's not free, it waits until it is released, but then
returns false without grabbing the lock. This is used in XLogFlush(), so
that when the lock is acquired, the backend flushes the WAL, but if it's
not, the backend first checks the current flush location before retrying.
Original patch and benchmarking by Peter Geoghegan and Simon Riggs, although
this patch as committed ended up being very different from that.
2012-01-30 15:40:58 +01:00
|
|
|
MyProc->lwWaitMode = 0;
|
2001-01-22 23:30:06 +01:00
|
|
|
MyProc->waitLock = NULL;
|
2004-08-27 19:07:42 +02:00
|
|
|
MyProc->waitProcLock = NULL;
|
2021-02-22 10:25:00 +01:00
|
|
|
pg_atomic_write_u64(&MyProc->waitStart, 0);
|
2011-11-02 03:44:54 +01:00
|
|
|
#ifdef USE_ASSERT_CHECKING
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Last process should have released all locks. */
|
|
|
|
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
|
|
|
|
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
|
|
|
|
}
|
|
|
|
#endif
|
2010-01-16 11:05:59 +01:00
|
|
|
MyProc->recoveryConflictPending = false;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-08-10 18:20:30 +02:00
|
|
|
/* Initialize fields for sync rep */
|
2012-06-24 17:51:37 +02:00
|
|
|
MyProc->waitLSN = 0;
|
2011-03-06 23:49:16 +01:00
|
|
|
MyProc->syncRepState = SYNC_REP_NOT_WAITING;
|
|
|
|
SHMQueueElemInit(&(MyProc->syncRepLinks));
|
2011-08-10 18:20:30 +02:00
|
|
|
|
2015-08-06 17:52:51 +02:00
|
|
|
/* Initialize fields for group XID clearing. */
|
2016-02-11 14:55:24 +01:00
|
|
|
MyProc->procArrayGroupMember = false;
|
|
|
|
MyProc->procArrayGroupMemberXid = InvalidTransactionId;
|
2018-11-13 06:22:40 +01:00
|
|
|
Assert(pg_atomic_read_u32(&MyProc->procArrayGroupNext) == INVALID_PGPROCNO);
|
2015-08-06 17:52:51 +02:00
|
|
|
|
2016-02-07 16:16:13 +01:00
|
|
|
/* Check that group locking fields are in a proper initial state. */
|
|
|
|
Assert(MyProc->lockGroupLeader == NULL);
|
|
|
|
Assert(dlist_is_empty(&MyProc->lockGroupMembers));
|
|
|
|
|
2016-03-10 18:44:09 +01:00
|
|
|
/* Initialize wait event information. */
|
|
|
|
MyProc->wait_event_info = 0;
|
|
|
|
|
2017-09-01 17:45:17 +02:00
|
|
|
/* Initialize fields for group transaction status update. */
|
|
|
|
MyProc->clogGroupMember = false;
|
|
|
|
MyProc->clogGroupMemberXid = InvalidTransactionId;
|
|
|
|
MyProc->clogGroupMemberXidStatus = TRANSACTION_STATUS_IN_PROGRESS;
|
|
|
|
MyProc->clogGroupMemberPage = -1;
|
|
|
|
MyProc->clogGroupMemberLsn = InvalidXLogRecPtr;
|
2018-11-13 06:22:40 +01:00
|
|
|
Assert(pg_atomic_read_u32(&MyProc->clogGroupNext) == INVALID_PGPROCNO);
|
2017-09-01 17:45:17 +02:00
|
|
|
|
2011-08-10 18:20:30 +02:00
|
|
|
/*
|
2015-01-14 18:45:22 +01:00
|
|
|
* Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
|
|
|
|
* on it. That allows us to repoint the process latch, which so far
|
|
|
|
* points to process local one, to the shared one.
|
2011-08-10 18:20:30 +02:00
|
|
|
*/
|
|
|
|
OwnLatch(&MyProc->procLatch);
|
2015-01-14 18:45:22 +01:00
|
|
|
SwitchToSharedLatch();
|
2011-03-06 23:49:16 +01:00
|
|
|
|
Improve efficiency of wait event reporting, remove proc.h dependency.
pgstat_report_wait_start() and pgstat_report_wait_end() required two
conditional branches so far. One to check if MyProc is NULL, the other to
check if pgstat_track_activities is set. As wait events are used around
comparatively lightweight operations, and are inlined (reducing branch
predictor effectiveness), that's not great.
The dependency on MyProc has a second disadvantage: Low-level subsystems, like
storage/file/fd.c, report wait events, but architecturally it is preferable
for them to not depend on inter-process subsystems like proc.h (defining
PGPROC). After this change including pgstat.h (nor obviously its
sub-components like backend_status.h, wait_event.h, ...) does not pull in IPC
related headers anymore.
These goals, efficiency and abstraction, are achieved by having
pgstat_report_wait_start/end() not interact with MyProc, but instead a new
my_wait_event_info variable. At backend startup it points to a local variable,
removing the need to check for MyProc being NULL. During process
initialization my_wait_event_info is redirected to MyProc->wait_event_info. At
shutdown this is reversed. Because wait event reporting now does not need to
know about where the wait event is stored, it does not need to know about
PGPROC anymore.
The removal of the branch for checking pgstat_track_activities is simpler:
Don't check anymore. The cost due to the branch are often higher than the
store - and even if not, pgstat_track_activities is rarely disabled.
The main motivator to commit this work now is that removing the (indirect)
pgproc.h include from pgstat.h simplifies a patch to move statistics reporting
to shared memory (which still has a chance to get into 14).
Author: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/20210402194458.2vu324hkk2djq6ce@alap3.anarazel.de
2021-04-03 20:44:47 +02:00
|
|
|
/* now that we have a proc, report wait events to shared memory */
|
|
|
|
pgstat_set_wait_event_storage(&MyProc->wait_event_info);
|
|
|
|
|
2005-05-19 23:35:48 +02:00
|
|
|
/*
|
2006-01-04 22:06:32 +01:00
|
|
|
* We might be reusing a semaphore that belonged to a failed process. So
|
2006-04-14 05:38:56 +02:00
|
|
|
* be careful and reinitialize its value here. (This is not strictly
|
|
|
|
* necessary anymore, but seems like a good idea for cleanliness.)
|
2005-05-19 23:35:48 +02:00
|
|
|
*/
|
Make the different Unix-y semaphore implementations ABI-compatible.
Previously, the "sem" field of PGPROC varied in size depending on which
kernel semaphore API we were using. That was okay as long as there was
only one likely choice per platform, but in the wake of commit ecb0d20a9,
that assumption seems rather shaky. It doesn't seem out of the question
anymore that an extension compiled against one API choice might be loaded
into a postmaster built with another choice. Moreover, this prevents any
possibility of selecting the semaphore API at postmaster startup, which
might be something we want to do in future.
Hence, change PGPROC.sem to be PGSemaphore (i.e. a pointer) for all Unix
semaphore APIs, and turn the pointed-to data into an opaque struct whose
contents are only known within the responsible modules.
For the SysV and unnamed-POSIX APIs, the pointed-to data has to be
allocated elsewhere in shared memory, which takes a little bit of
rejiggering of the InitShmemAllocation code sequence. (I invented a
ShmemAllocUnlocked() function to make that a little cleaner than it used
to be. That function is not meant for any uses other than the ones it
has now, but it beats having InitShmemAllocation() know explicitly about
allocation of space for semaphores and spinlocks.) This change means an
extra indirection to access the semaphore data, but since we only touch
that when blocking or awakening a process, there shouldn't be any
meaningful performance penalty. Moreover, at least for the unnamed-POSIX
case on Linux, the sem_t type is quite a bit wider than a pointer, so this
reduces sizeof(PGPROC) which seems like a good thing.
For the named-POSIX API, there's effectively no change: the PGPROC.sem
field was and still is a pointer to something returned by sem_open() in
the postmaster's memory space. Document and check the pre-existing
limitation that this case can't work in EXEC_BACKEND mode.
It did not seem worth unifying the Windows semaphore ABI with the Unix
cases, since there's no likelihood of needing ABI compatibility much less
runtime switching across those cases. However, we can simplify the Windows
code a bit if we define PGSemaphore as being directly a HANDLE, rather than
pointer to HANDLE, so let's do that while we're here. (This also ends up
being no change in what's physically stored in PGPROC.sem. We're just
moving the HANDLE fetch from callees to callers.)
It would take a bunch of additional code shuffling to get to the point of
actually choosing a semaphore API at postmaster start, but the effects
of that would now be localized in the port/XXX_sema.c files, so it seems
like fit material for a separate patch. The need for it is unproven as
yet, anyhow, whereas the ABI risk to extensions seems real enough.
Discussion: https://postgr.es/m/4029.1481413370@sss.pgh.pa.us
2016-12-12 19:32:10 +01:00
|
|
|
PGSemaphoreReset(MyProc->sem);
|
2005-05-19 23:35:48 +02:00
|
|
|
|
2001-01-25 04:31:16 +01:00
|
|
|
/*
|
2001-09-29 06:02:27 +02:00
|
|
|
* Arrange to clean up at backend exit.
|
2001-01-25 04:31:16 +01:00
|
|
|
*/
|
2001-01-14 06:08:17 +01:00
|
|
|
on_shmem_exit(ProcKill, 0);
|
2001-01-25 04:31:16 +01:00
|
|
|
|
|
|
|
/*
|
2002-06-11 15:40:53 +02:00
|
|
|
* Now that we have a PGPROC, we could try to acquire locks, so initialize
|
2014-06-30 09:13:48 +02:00
|
|
|
* local state needed for LWLocks, and the deadlock checker.
|
2001-01-25 04:31:16 +01:00
|
|
|
*/
|
2014-06-30 09:13:48 +02:00
|
|
|
InitLWLockAccess();
|
2001-01-25 04:31:16 +01:00
|
|
|
InitDeadLockChecking();
|
2001-01-14 06:08:17 +01:00
|
|
|
}
|
|
|
|
|
2006-01-04 22:06:32 +01:00
|
|
|
/*
|
|
|
|
* InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
|
|
|
|
*
|
|
|
|
* This is separate from InitProcess because we can't acquire LWLocks until
|
2009-08-12 22:53:31 +02:00
|
|
|
* we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
|
|
|
|
* work until after we've done CreateSharedMemoryAndSemaphores.
|
2006-01-04 22:06:32 +01:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
InitProcessPhase2(void)
|
|
|
|
{
|
|
|
|
Assert(MyProc != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add our PGPROC to the PGPROC array in shared memory.
|
|
|
|
*/
|
|
|
|
ProcArrayAdd(MyProc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Arrange to clean that up at backend exit.
|
|
|
|
*/
|
|
|
|
on_shmem_exit(RemoveProcFromArray, 0);
|
|
|
|
}
|
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
/*
|
2007-03-07 14:35:03 +01:00
|
|
|
* InitAuxiliaryProcess -- create a per-auxiliary-process data structure
|
2001-09-29 06:02:27 +02:00
|
|
|
*
|
2005-08-08 05:12:16 +02:00
|
|
|
* This is called by bgwriter and similar processes so that they will have a
|
|
|
|
* MyProc value that's real enough to let them wait for LWLocks. The PGPROC
|
2006-01-04 22:06:32 +01:00
|
|
|
* and sema that are assigned are one of the extra ones created during
|
2005-08-08 05:12:16 +02:00
|
|
|
* InitProcGlobal.
|
2004-05-30 00:48:23 +02:00
|
|
|
*
|
2007-03-07 14:35:03 +01:00
|
|
|
* Auxiliary processes are presently not expected to wait for real (lockmgr)
|
2006-01-04 22:06:32 +01:00
|
|
|
* locks, so we need not set up the deadlock checker. They are never added
|
2007-09-05 20:10:48 +02:00
|
|
|
* to the ProcArray or the sinval messaging mechanism, either. They also
|
|
|
|
* don't get a VXID assigned, since this is only useful when we actually
|
|
|
|
* hold lockmgr locks.
|
Allow read only connections during recovery, known as Hot Standby.
Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record.
New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far.
This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required.
Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit.
Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
|
|
|
*
|
|
|
|
* Startup process however uses locks but never waits for them in the
|
|
|
|
* normal backend sense. Startup process also takes part in sinval messaging
|
|
|
|
* as a sendOnly process, so never reads messages from sinval queue. So
|
|
|
|
* Startup process does have a VXID and does show up in pg_locks.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
|
|
|
void
|
2007-03-07 14:35:03 +01:00
|
|
|
InitAuxiliaryProcess(void)
|
2001-09-29 06:02:27 +02:00
|
|
|
{
|
2007-03-07 14:35:03 +01:00
|
|
|
PGPROC *auxproc;
|
2006-01-04 22:06:32 +01:00
|
|
|
int proctype;
|
2003-11-19 16:55:08 +01:00
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
/*
|
2006-01-04 22:06:32 +01:00
|
|
|
* ProcGlobal should be set up already (if we are a backend, we inherit
|
|
|
|
* this by fork() or EXEC_BACKEND mechanism from the postmaster).
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
2007-03-07 14:35:03 +01:00
|
|
|
if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
|
2003-07-25 00:04:15 +02:00
|
|
|
elog(PANIC, "proc header uninitialized");
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
if (MyProc != NULL)
|
2003-07-25 00:04:15 +02:00
|
|
|
elog(ERROR, "you already exist");
|
2001-09-29 06:02:27 +02:00
|
|
|
|
2005-10-11 22:41:32 +02:00
|
|
|
/*
|
2006-01-04 22:06:32 +01:00
|
|
|
* We use the ProcStructLock to protect assignment and releasing of
|
2007-03-07 14:35:03 +01:00
|
|
|
* AuxiliaryProcs entries.
|
2005-10-11 22:41:32 +02:00
|
|
|
*
|
|
|
|
* While we are holding the ProcStructLock, also copy the current shared
|
|
|
|
* estimate of spins_per_delay to local storage.
|
|
|
|
*/
|
|
|
|
SpinLockAcquire(ProcStructLock);
|
|
|
|
|
|
|
|
set_spins_per_delay(ProcGlobal->spins_per_delay);
|
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
/*
|
2007-03-07 14:35:03 +01:00
|
|
|
* Find a free auxproc ... *big* trouble if there isn't one ...
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
2007-03-07 14:35:03 +01:00
|
|
|
for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
|
2006-01-04 22:06:32 +01:00
|
|
|
{
|
2007-03-07 14:35:03 +01:00
|
|
|
auxproc = &AuxiliaryProcs[proctype];
|
|
|
|
if (auxproc->pid == 0)
|
2006-01-04 22:06:32 +01:00
|
|
|
break;
|
|
|
|
}
|
2007-03-07 14:35:03 +01:00
|
|
|
if (proctype >= NUM_AUXILIARY_PROCS)
|
2005-10-11 22:41:32 +02:00
|
|
|
{
|
|
|
|
SpinLockRelease(ProcStructLock);
|
2007-03-07 14:35:03 +01:00
|
|
|
elog(FATAL, "all AuxiliaryProcs are in use");
|
2005-10-11 22:41:32 +02:00
|
|
|
}
|
2001-09-29 06:02:27 +02:00
|
|
|
|
2007-03-07 14:35:03 +01:00
|
|
|
/* Mark auxiliary proc as in use by me */
|
2006-01-04 22:06:32 +01:00
|
|
|
/* use volatile pointer to prevent code rearrangement */
|
2007-03-07 14:35:03 +01:00
|
|
|
((volatile PGPROC *) auxproc)->pid = MyProcPid;
|
2006-01-04 22:06:32 +01:00
|
|
|
|
2007-03-07 14:35:03 +01:00
|
|
|
MyProc = auxproc;
|
2005-10-11 22:41:32 +02:00
|
|
|
|
|
|
|
SpinLockRelease(ProcStructLock);
|
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
/*
|
2011-11-02 03:44:54 +01:00
|
|
|
* Initialize all fields of MyProc, except for those previously
|
|
|
|
* initialized by InitProcGlobal.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
|
|
|
SHMQueueElemInit(&(MyProc->links));
|
2020-06-17 09:14:37 +02:00
|
|
|
MyProc->waitStatus = PROC_WAIT_STATUS_OK;
|
2007-09-05 20:10:48 +02:00
|
|
|
MyProc->lxid = InvalidLocalTransactionId;
|
2012-11-29 23:15:52 +01:00
|
|
|
MyProc->fpVXIDLock = false;
|
|
|
|
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
|
2020-08-14 21:15:38 +02:00
|
|
|
MyProc->xid = InvalidTransactionId;
|
2020-08-14 01:25:21 +02:00
|
|
|
MyProc->xmin = InvalidTransactionId;
|
2007-09-05 20:10:48 +02:00
|
|
|
MyProc->backendId = InvalidBackendId;
|
2006-01-04 22:06:32 +01:00
|
|
|
MyProc->databaseId = InvalidOid;
|
2005-07-31 19:19:22 +02:00
|
|
|
MyProc->roleId = InvalidOid;
|
Make autovacuum more aggressive to remove orphaned temp tables
Commit dafa084, added in 10, made the removal of temporary orphaned
tables more aggressive. This commit makes an extra step into the
aggressiveness by adding a flag in each backend's MyProc which tracks
down any temporary namespace currently in use. The flag is set when the
namespace gets created and can be reset if the temporary namespace has
been created in a transaction or sub-transaction which is aborted. The
flag value assignment is assumed to be atomic, so this can be done in a
lock-less fashion like other flags already present in PGPROC like
databaseId or backendId, still the fact that the temporary namespace and
table created are still locked until the transaction creating those
commits acts as a barrier for other backends.
This new flag gets used by autovacuum to discard more aggressively
orphaned tables by additionally checking for the database a backend is
connected to as well as its temporary namespace in-use, removing
orphaned temporary relations even if a backend reuses the same slot as
one which created temporary relations in a past session.
The base idea of this patch comes from Robert Haas, has been written in
its first version by Tsunakawa Takayuki, then heavily reviewed by me.
Author: Tsunakawa Takayuki
Reviewed-by: Michael Paquier, Kyotaro Horiguchi, Andres Freund
Discussion: https://postgr.es/m/0A3221C70F24FB45833433255569204D1F8A4DC6@G01JPEXMBYT05
Backpatch: 11-, as PGPROC gains a new flag and we don't want silent ABI
breakages on already released versions.
2018-08-13 11:49:04 +02:00
|
|
|
MyProc->tempNamespaceId = InvalidOid;
|
2017-02-01 23:52:35 +01:00
|
|
|
MyProc->isBackgroundWorker = IsBackgroundWorker;
|
2022-03-24 19:32:06 +01:00
|
|
|
MyProc->delayChkpt = 0;
|
2020-11-16 23:42:55 +01:00
|
|
|
MyProc->statusFlags = 0;
|
2001-09-29 06:02:27 +02:00
|
|
|
MyProc->lwWaiting = false;
|
Make group commit more effective.
When a backend needs to flush the WAL, and someone else is already flushing
the WAL, wait until it releases the WALInsertLock and check if we still need
to do the flush or if the other backend already did the work for us, before
acquiring WALInsertLock. This helps group commit, because when the WAL flush
finishes, all the backends that were waiting for it can be woken up in one
go, and the can all concurrently observe that they're done, rather than
waking them up one by one in a cascading fashion.
This is based on a new LWLock function, LWLockWaitUntilFree(), which has
peculiar semantics. If the lock is immediately free, it grabs the lock and
returns true. If it's not free, it waits until it is released, but then
returns false without grabbing the lock. This is used in XLogFlush(), so
that when the lock is acquired, the backend flushes the WAL, but if it's
not, the backend first checks the current flush location before retrying.
Original patch and benchmarking by Peter Geoghegan and Simon Riggs, although
this patch as committed ended up being very different from that.
2012-01-30 15:40:58 +01:00
|
|
|
MyProc->lwWaitMode = 0;
|
2001-09-29 06:02:27 +02:00
|
|
|
MyProc->waitLock = NULL;
|
2004-08-27 19:07:42 +02:00
|
|
|
MyProc->waitProcLock = NULL;
|
2021-02-22 10:25:00 +01:00
|
|
|
pg_atomic_write_u64(&MyProc->waitStart, 0);
|
2011-11-02 03:44:54 +01:00
|
|
|
#ifdef USE_ASSERT_CHECKING
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Last process should have released all locks. */
|
|
|
|
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
|
|
|
|
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
|
|
|
|
}
|
|
|
|
#endif
|
2001-09-29 06:02:27 +02:00
|
|
|
|
2011-08-10 18:20:30 +02:00
|
|
|
/*
|
2015-01-14 18:45:22 +01:00
|
|
|
* Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
|
|
|
|
* on it. That allows us to repoint the process latch, which so far
|
|
|
|
* points to process local one, to the shared one.
|
2011-08-10 18:20:30 +02:00
|
|
|
*/
|
|
|
|
OwnLatch(&MyProc->procLatch);
|
2015-01-14 18:45:22 +01:00
|
|
|
SwitchToSharedLatch();
|
2011-08-10 18:20:30 +02:00
|
|
|
|
Improve efficiency of wait event reporting, remove proc.h dependency.
pgstat_report_wait_start() and pgstat_report_wait_end() required two
conditional branches so far. One to check if MyProc is NULL, the other to
check if pgstat_track_activities is set. As wait events are used around
comparatively lightweight operations, and are inlined (reducing branch
predictor effectiveness), that's not great.
The dependency on MyProc has a second disadvantage: Low-level subsystems, like
storage/file/fd.c, report wait events, but architecturally it is preferable
for them to not depend on inter-process subsystems like proc.h (defining
PGPROC). After this change including pgstat.h (nor obviously its
sub-components like backend_status.h, wait_event.h, ...) does not pull in IPC
related headers anymore.
These goals, efficiency and abstraction, are achieved by having
pgstat_report_wait_start/end() not interact with MyProc, but instead a new
my_wait_event_info variable. At backend startup it points to a local variable,
removing the need to check for MyProc being NULL. During process
initialization my_wait_event_info is redirected to MyProc->wait_event_info. At
shutdown this is reversed. Because wait event reporting now does not need to
know about where the wait event is stored, it does not need to know about
PGPROC anymore.
The removal of the branch for checking pgstat_track_activities is simpler:
Don't check anymore. The cost due to the branch are often higher than the
store - and even if not, pgstat_track_activities is rarely disabled.
The main motivator to commit this work now is that removing the (indirect)
pgproc.h include from pgstat.h simplifies a patch to move statistics reporting
to shared memory (which still has a chance to get into 14).
Author: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/20210402194458.2vu324hkk2djq6ce@alap3.anarazel.de
2021-04-03 20:44:47 +02:00
|
|
|
/* now that we have a proc, report wait events to shared memory */
|
|
|
|
pgstat_set_wait_event_storage(&MyProc->wait_event_info);
|
|
|
|
|
2016-02-07 16:16:13 +01:00
|
|
|
/* Check that group locking fields are in a proper initial state. */
|
|
|
|
Assert(MyProc->lockGroupLeader == NULL);
|
|
|
|
Assert(dlist_is_empty(&MyProc->lockGroupMembers));
|
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
/*
|
|
|
|
* We might be reusing a semaphore that belonged to a failed process. So
|
2006-04-14 05:38:56 +02:00
|
|
|
* be careful and reinitialize its value here. (This is not strictly
|
|
|
|
* necessary anymore, but seems like a good idea for cleanliness.)
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
Make the different Unix-y semaphore implementations ABI-compatible.
Previously, the "sem" field of PGPROC varied in size depending on which
kernel semaphore API we were using. That was okay as long as there was
only one likely choice per platform, but in the wake of commit ecb0d20a9,
that assumption seems rather shaky. It doesn't seem out of the question
anymore that an extension compiled against one API choice might be loaded
into a postmaster built with another choice. Moreover, this prevents any
possibility of selecting the semaphore API at postmaster startup, which
might be something we want to do in future.
Hence, change PGPROC.sem to be PGSemaphore (i.e. a pointer) for all Unix
semaphore APIs, and turn the pointed-to data into an opaque struct whose
contents are only known within the responsible modules.
For the SysV and unnamed-POSIX APIs, the pointed-to data has to be
allocated elsewhere in shared memory, which takes a little bit of
rejiggering of the InitShmemAllocation code sequence. (I invented a
ShmemAllocUnlocked() function to make that a little cleaner than it used
to be. That function is not meant for any uses other than the ones it
has now, but it beats having InitShmemAllocation() know explicitly about
allocation of space for semaphores and spinlocks.) This change means an
extra indirection to access the semaphore data, but since we only touch
that when blocking or awakening a process, there shouldn't be any
meaningful performance penalty. Moreover, at least for the unnamed-POSIX
case on Linux, the sem_t type is quite a bit wider than a pointer, so this
reduces sizeof(PGPROC) which seems like a good thing.
For the named-POSIX API, there's effectively no change: the PGPROC.sem
field was and still is a pointer to something returned by sem_open() in
the postmaster's memory space. Document and check the pre-existing
limitation that this case can't work in EXEC_BACKEND mode.
It did not seem worth unifying the Windows semaphore ABI with the Unix
cases, since there's no likelihood of needing ABI compatibility much less
runtime switching across those cases. However, we can simplify the Windows
code a bit if we define PGSemaphore as being directly a HANDLE, rather than
pointer to HANDLE, so let's do that while we're here. (This also ends up
being no change in what's physically stored in PGPROC.sem. We're just
moving the HANDLE fetch from callees to callers.)
It would take a bunch of additional code shuffling to get to the point of
actually choosing a semaphore API at postmaster start, but the effects
of that would now be localized in the port/XXX_sema.c files, so it seems
like fit material for a separate patch. The need for it is unproven as
yet, anyhow, whereas the ABI risk to extensions seems real enough.
Discussion: https://postgr.es/m/4029.1481413370@sss.pgh.pa.us
2016-12-12 19:32:10 +01:00
|
|
|
PGSemaphoreReset(MyProc->sem);
|
2006-01-04 22:06:32 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Arrange to clean up at process exit.
|
|
|
|
*/
|
2007-03-07 14:35:03 +01:00
|
|
|
on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2010-01-23 17:37:12 +01:00
|
|
|
/*
|
2019-10-30 02:03:00 +01:00
|
|
|
* Used from bufmgr to share the value of the buffer that Startup waits on,
|
2010-01-23 17:37:12 +01:00
|
|
|
* or to reset the value to "not waiting" (-1). This allows processing
|
|
|
|
* of recovery conflicts for buffer pins. Set is made before backends look
|
|
|
|
* at this value, so locking not required, especially since the set is
|
|
|
|
* an atomic integer set operation.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
SetStartupBufferPinWaitBufId(int bufid)
|
|
|
|
{
|
|
|
|
/* use volatile pointer to prevent code rearrangement */
|
|
|
|
volatile PROC_HDR *procglobal = ProcGlobal;
|
|
|
|
|
|
|
|
procglobal->startupBufferPinWaitBufId = bufid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Used by backends when they receive a request to check for buffer pin waits.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
GetStartupBufferPinWaitBufId(void)
|
|
|
|
{
|
|
|
|
/* use volatile pointer to prevent code rearrangement */
|
|
|
|
volatile PROC_HDR *procglobal = ProcGlobal;
|
|
|
|
|
2011-08-02 19:23:52 +02:00
|
|
|
return procglobal->startupBufferPinWaitBufId;
|
2010-01-23 17:37:12 +01:00
|
|
|
}
|
|
|
|
|
2005-06-18 00:32:51 +02:00
|
|
|
/*
|
|
|
|
* Check whether there are at least N free PGPROC objects.
|
|
|
|
*
|
|
|
|
* Note: this is designed on the assumption that N will generally be small.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
HaveNFreeProcs(int n)
|
|
|
|
{
|
|
|
|
PGPROC *proc;
|
2005-10-15 04:49:52 +02:00
|
|
|
|
2005-06-18 00:32:51 +02:00
|
|
|
SpinLockAcquire(ProcStructLock);
|
|
|
|
|
2015-10-16 20:20:36 +02:00
|
|
|
proc = ProcGlobal->freeProcs;
|
2005-06-18 00:32:51 +02:00
|
|
|
|
2008-11-02 22:24:52 +01:00
|
|
|
while (n > 0 && proc != NULL)
|
2005-06-18 00:32:51 +02:00
|
|
|
{
|
2008-11-02 22:24:52 +01:00
|
|
|
proc = (PGPROC *) proc->links.next;
|
2005-06-18 00:32:51 +02:00
|
|
|
n--;
|
|
|
|
}
|
|
|
|
|
|
|
|
SpinLockRelease(ProcStructLock);
|
|
|
|
|
|
|
|
return (n <= 0);
|
|
|
|
}
|
|
|
|
|
Improve control logic for bgwriter hibernation mode.
Commit 6d90eaaa89a007e0d365f49d6436f35d2392cfeb added a hibernation mode
to the bgwriter to reduce the server's idle-power consumption. However,
its interaction with the detailed behavior of BgBufferSync's feedback
control loop wasn't very well thought out. That control loop depends
primarily on the rate of buffer allocation, not the rate of buffer
dirtying, so the hibernation mode has to be designed to operate only when
no new buffer allocations are happening. Also, the check for whether the
system is effectively idle was not quite right and would fail to detect
a constant low level of activity, thus allowing the bgwriter to go into
hibernation mode in a way that would let the cycle time vary quite a bit,
possibly further confusing the feedback loop. To fix, move the wakeup
support from MarkBufferDirty and SetBufferCommitInfoNeedsSave into
StrategyGetBuffer, and prevent the bgwriter from entering hibernation mode
unless no buffer allocations have happened recently.
In addition, fix the delaying logic to remove the problem of possibly not
responding to signals promptly, which was basically caused by trying to use
the process latch's is_set flag for multiple purposes. I can't prove it
but I'm suspicious that that hack was responsible for the intermittent
"postmaster does not shut down" failures we've been seeing in the buildfarm
lately. In any case it did nothing to improve the readability or
robustness of the code.
In passing, express the hibernation sleep time as a multiplier on
BgWriterDelay, not a constant. I'm not sure whether there's any value in
exposing the longer sleep time as an independently configurable setting,
but we can at least make it act like this for little extra code.
2012-05-10 05:36:01 +02:00
|
|
|
/*
|
|
|
|
* Check if the current process is awaiting a lock.
|
|
|
|
*/
|
2010-02-13 02:32:20 +01:00
|
|
|
bool
|
|
|
|
IsWaitingForLock(void)
|
|
|
|
{
|
|
|
|
if (lockAwaited == NULL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2001-01-14 06:08:17 +01:00
|
|
|
/*
|
2012-04-18 17:17:30 +02:00
|
|
|
* Cancel any pending wait for lock, when aborting a transaction, and revert
|
|
|
|
* any strong lock count acquisition for a lock being acquired.
|
2001-01-14 06:08:17 +01:00
|
|
|
*
|
|
|
|
* (Normally, this would only happen if we accept a cancel/die
|
2012-04-18 17:17:30 +02:00
|
|
|
* interrupt while waiting; but an ereport(ERROR) before or during the lock
|
|
|
|
* wait is within the realm of possibility, too.)
|
2001-01-14 06:08:17 +01:00
|
|
|
*/
|
2008-01-26 20:55:08 +01:00
|
|
|
void
|
2012-04-18 17:17:30 +02:00
|
|
|
LockErrorCleanup(void)
|
2001-01-14 06:08:17 +01:00
|
|
|
{
|
2014-01-27 17:07:44 +01:00
|
|
|
LWLock *partitionLock;
|
2013-03-17 04:22:17 +01:00
|
|
|
DisableTimeoutParams timeouts[2];
|
2005-12-11 22:02:18 +01:00
|
|
|
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
|
|
|
HOLD_INTERRUPTS();
|
|
|
|
|
2012-04-18 17:17:30 +02:00
|
|
|
AbortStrongLockAcquire();
|
|
|
|
|
2001-01-14 06:08:17 +01:00
|
|
|
/* Nothing to do if we weren't waiting for a lock */
|
2005-12-11 22:02:18 +01:00
|
|
|
if (lockAwaited == NULL)
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
|
|
|
{
|
|
|
|
RESUME_INTERRUPTS();
|
2008-01-26 20:55:08 +01:00
|
|
|
return;
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
|
|
|
}
|
2001-01-16 21:59:34 +01:00
|
|
|
|
2013-03-17 04:22:17 +01:00
|
|
|
/*
|
|
|
|
* Turn off the deadlock and lock timeout timers, if they are still
|
|
|
|
* running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
|
|
|
|
* indicator flag, since this function is executed before
|
|
|
|
* ProcessInterrupts when responding to SIGINT; else we'd lose the
|
|
|
|
* knowledge that the SIGINT came from a lock timeout and not an external
|
|
|
|
* source.
|
|
|
|
*/
|
|
|
|
timeouts[0].id = DEADLOCK_TIMEOUT;
|
|
|
|
timeouts[0].keep_indicator = false;
|
|
|
|
timeouts[1].id = LOCK_TIMEOUT;
|
|
|
|
timeouts[1].keep_indicator = true;
|
|
|
|
disable_timeouts(timeouts, 2);
|
2001-01-14 06:08:17 +01:00
|
|
|
|
|
|
|
/* Unlink myself from the wait queue, if on it (might not be anymore!) */
|
2006-07-24 01:08:46 +02:00
|
|
|
partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
|
2005-12-11 22:02:18 +01:00
|
|
|
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
|
2004-07-17 05:32:14 +02:00
|
|
|
|
2008-11-02 22:24:52 +01:00
|
|
|
if (MyProc->links.next != NULL)
|
2004-07-17 05:32:14 +02:00
|
|
|
{
|
|
|
|
/* We could not have been granted the lock yet */
|
2006-07-24 01:08:46 +02:00
|
|
|
RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
|
2004-07-17 05:32:14 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Somebody kicked us off the lock queue already. Perhaps they
|
|
|
|
* granted us the lock, or perhaps they detected a deadlock. If they
|
|
|
|
* did grant us the lock, we'd better remember it in our local lock
|
2004-08-27 19:07:42 +02:00
|
|
|
* table.
|
2004-07-17 05:32:14 +02:00
|
|
|
*/
|
2020-06-17 09:14:37 +02:00
|
|
|
if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
|
2004-08-27 19:07:42 +02:00
|
|
|
GrantAwaitedLock();
|
2004-07-17 05:32:14 +02:00
|
|
|
}
|
|
|
|
|
2005-12-11 22:02:18 +01:00
|
|
|
lockAwaited = NULL;
|
2004-07-17 05:32:14 +02:00
|
|
|
|
2005-12-11 22:02:18 +01:00
|
|
|
LWLockRelease(partitionLock);
|
2000-02-21 03:42:37 +01:00
|
|
|
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
|
|
|
RESUME_INTERRUPTS();
|
2000-02-21 03:42:37 +01:00
|
|
|
}
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2001-01-14 06:08:17 +01:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-12-22 01:51:54 +01:00
|
|
|
* ProcReleaseLocks() -- release locks associated with current transaction
|
2004-07-17 05:32:14 +02:00
|
|
|
* at main transaction commit or abort
|
2004-07-01 02:52:04 +02:00
|
|
|
*
|
Overdue code review for transaction-level advisory locks patch.
Commit 62c7bd31c8878dd45c9b9b2429ab7a12103f3590 had assorted problems, most
visibly that it broke PREPARE TRANSACTION in the presence of session-level
advisory locks (which should be ignored by PREPARE), as per a recent
complaint from Stephen Rees. More abstractly, the patch made the
LockMethodData.transactional flag not merely useless but outright
dangerous, because in point of fact that flag no longer tells you anything
at all about whether a lock is held transactionally. This fix therefore
removes that flag altogether. We now rely entirely on the convention
already in use in lock.c that transactional lock holds must be owned by
some ResourceOwner, while session holds are never so owned. Setting the
locallock struct's owner link to NULL thus denotes a session hold, and
there is no redundant marker for that.
PREPARE TRANSACTION now works again when there are session-level advisory
locks, and it is also able to transfer transactional advisory locks to the
prepared transaction, but for implementation reasons it throws an error if
we hold both types of lock on a single lockable object. Perhaps it will be
worth improving that someday.
Assorted other minor cleanup and documentation editing, as well.
Back-patch to 9.1, except that in the 9.1 branch I did not remove the
LockMethodData.transactional flag for fear of causing an ABI break for
any external code that might be examining those structs.
2012-05-04 23:43:27 +02:00
|
|
|
* At main transaction commit, we release standard locks except session locks.
|
2010-02-08 05:33:55 +01:00
|
|
|
* At main transaction abort, we release all locks including session locks.
|
2004-07-01 02:52:04 +02:00
|
|
|
*
|
Overdue code review for transaction-level advisory locks patch.
Commit 62c7bd31c8878dd45c9b9b2429ab7a12103f3590 had assorted problems, most
visibly that it broke PREPARE TRANSACTION in the presence of session-level
advisory locks (which should be ignored by PREPARE), as per a recent
complaint from Stephen Rees. More abstractly, the patch made the
LockMethodData.transactional flag not merely useless but outright
dangerous, because in point of fact that flag no longer tells you anything
at all about whether a lock is held transactionally. This fix therefore
removes that flag altogether. We now rely entirely on the convention
already in use in lock.c that transactional lock holds must be owned by
some ResourceOwner, while session holds are never so owned. Setting the
locallock struct's owner link to NULL thus denotes a session hold, and
there is no redundant marker for that.
PREPARE TRANSACTION now works again when there are session-level advisory
locks, and it is also able to transfer transactional advisory locks to the
prepared transaction, but for implementation reasons it throws an error if
we hold both types of lock on a single lockable object. Perhaps it will be
worth improving that someday.
Assorted other minor cleanup and documentation editing, as well.
Back-patch to 9.1, except that in the 9.1 branch I did not remove the
LockMethodData.transactional flag for fear of causing an ABI break for
any external code that might be examining those structs.
2012-05-04 23:43:27 +02:00
|
|
|
* Advisory locks are released only if they are transaction-level;
|
|
|
|
* session-level holds remain, whether this is a commit or not.
|
|
|
|
*
|
2004-07-01 02:52:04 +02:00
|
|
|
* At subtransaction commit, we don't release any locks (so this func is not
|
2004-07-17 05:32:14 +02:00
|
|
|
* needed at all); we will defer the releasing to the parent transaction.
|
2004-07-01 02:52:04 +02:00
|
|
|
* At subtransaction abort, we release all locks held by the subtransaction;
|
2004-07-17 05:32:14 +02:00
|
|
|
* this is implemented by retail releasing of the locks under control of
|
|
|
|
* the ResourceOwner mechanism.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
void
|
2004-07-17 05:32:14 +02:00
|
|
|
ProcReleaseLocks(bool isCommit)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
|
|
|
if (!MyProc)
|
|
|
|
return;
|
2001-01-14 06:08:17 +01:00
|
|
|
/* If waiting, get off wait queue (should only be needed after error) */
|
2012-04-18 17:17:30 +02:00
|
|
|
LockErrorCleanup();
|
Overdue code review for transaction-level advisory locks patch.
Commit 62c7bd31c8878dd45c9b9b2429ab7a12103f3590 had assorted problems, most
visibly that it broke PREPARE TRANSACTION in the presence of session-level
advisory locks (which should be ignored by PREPARE), as per a recent
complaint from Stephen Rees. More abstractly, the patch made the
LockMethodData.transactional flag not merely useless but outright
dangerous, because in point of fact that flag no longer tells you anything
at all about whether a lock is held transactionally. This fix therefore
removes that flag altogether. We now rely entirely on the convention
already in use in lock.c that transactional lock holds must be owned by
some ResourceOwner, while session holds are never so owned. Setting the
locallock struct's owner link to NULL thus denotes a session hold, and
there is no redundant marker for that.
PREPARE TRANSACTION now works again when there are session-level advisory
locks, and it is also able to transfer transactional advisory locks to the
prepared transaction, but for implementation reasons it throws an error if
we hold both types of lock on a single lockable object. Perhaps it will be
worth improving that someday.
Assorted other minor cleanup and documentation editing, as well.
Back-patch to 9.1, except that in the 9.1 branch I did not remove the
LockMethodData.transactional flag for fear of causing an ABI break for
any external code that might be examining those structs.
2012-05-04 23:43:27 +02:00
|
|
|
/* Release standard locks, including session-level if aborting */
|
2004-08-27 19:07:42 +02:00
|
|
|
LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
|
Overdue code review for transaction-level advisory locks patch.
Commit 62c7bd31c8878dd45c9b9b2429ab7a12103f3590 had assorted problems, most
visibly that it broke PREPARE TRANSACTION in the presence of session-level
advisory locks (which should be ignored by PREPARE), as per a recent
complaint from Stephen Rees. More abstractly, the patch made the
LockMethodData.transactional flag not merely useless but outright
dangerous, because in point of fact that flag no longer tells you anything
at all about whether a lock is held transactionally. This fix therefore
removes that flag altogether. We now rely entirely on the convention
already in use in lock.c that transactional lock holds must be owned by
some ResourceOwner, while session holds are never so owned. Setting the
locallock struct's owner link to NULL thus denotes a session hold, and
there is no redundant marker for that.
PREPARE TRANSACTION now works again when there are session-level advisory
locks, and it is also able to transfer transactional advisory locks to the
prepared transaction, but for implementation reasons it throws an error if
we hold both types of lock on a single lockable object. Perhaps it will be
worth improving that someday.
Assorted other minor cleanup and documentation editing, as well.
Back-patch to 9.1, except that in the 9.1 branch I did not remove the
LockMethodData.transactional flag for fear of causing an ABI break for
any external code that might be examining those structs.
2012-05-04 23:43:27 +02:00
|
|
|
/* Release transaction-level advisory locks */
|
2011-02-18 06:04:34 +01:00
|
|
|
LockReleaseAll(USER_LOCKMETHOD, false);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-01-04 22:06:32 +01:00
|
|
|
/*
|
|
|
|
* RemoveProcFromArray() -- Remove this process from the shared ProcArray.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
RemoveProcFromArray(int code, Datum arg)
|
|
|
|
{
|
|
|
|
Assert(MyProc != NULL);
|
2007-09-08 22:31:15 +02:00
|
|
|
ProcArrayRemove(MyProc, InvalidTransactionId);
|
2006-01-04 22:06:32 +01:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
|
|
|
* ProcKill() -- Destroy the per-proc data structure for
|
2001-09-29 06:02:27 +02:00
|
|
|
* this process. Release any of its held LW locks.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
static void
|
2003-12-12 19:45:10 +01:00
|
|
|
ProcKill(int code, Datum arg)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2014-02-01 03:31:08 +01:00
|
|
|
PGPROC *proc;
|
2015-07-28 20:51:57 +02:00
|
|
|
PGPROC *volatile *procgloballist;
|
2001-12-28 19:16:43 +01:00
|
|
|
|
2001-09-07 02:27:30 +02:00
|
|
|
Assert(MyProc != NULL);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-08-10 18:20:30 +02:00
|
|
|
/* Make sure we're out of the sync rep lists */
|
|
|
|
SyncRepCleanupAtProcExit();
|
|
|
|
|
2011-11-02 03:44:54 +01:00
|
|
|
#ifdef USE_ASSERT_CHECKING
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Last process should have released all locks. */
|
|
|
|
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
|
|
|
|
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-09-25 22:31:40 +02:00
|
|
|
/*
|
2005-08-08 05:12:16 +02:00
|
|
|
* Release any LW locks I am holding. There really shouldn't be any, but
|
|
|
|
* it's cheap to check again before we cut the knees off the LWLock
|
|
|
|
* facility by releasing our PGPROC ...
|
2002-09-25 22:31:40 +02:00
|
|
|
*/
|
2005-08-08 05:12:16 +02:00
|
|
|
LWLockReleaseAll();
|
2001-01-14 06:08:17 +01:00
|
|
|
|
2016-11-22 20:26:40 +01:00
|
|
|
/* Cancel any pending condition variable sleep, too */
|
|
|
|
ConditionVariableCancelSleep();
|
|
|
|
|
2016-02-07 16:16:13 +01:00
|
|
|
/*
|
|
|
|
* Detach from any lock group of which we are a member. If the leader
|
2020-02-07 04:41:10 +01:00
|
|
|
* exist before all other group members, its PGPROC will remain allocated
|
2016-02-07 16:16:13 +01:00
|
|
|
* until the last group process exits; that process must return the
|
|
|
|
* leader's PGPROC to the appropriate list.
|
|
|
|
*/
|
|
|
|
if (MyProc->lockGroupLeader != NULL)
|
|
|
|
{
|
|
|
|
PGPROC *leader = MyProc->lockGroupLeader;
|
|
|
|
LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
|
|
|
|
|
|
|
|
LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
|
|
|
|
Assert(!dlist_is_empty(&leader->lockGroupMembers));
|
|
|
|
dlist_delete(&MyProc->lockGroupLink);
|
|
|
|
if (dlist_is_empty(&leader->lockGroupMembers))
|
|
|
|
{
|
|
|
|
leader->lockGroupLeader = NULL;
|
|
|
|
if (leader != MyProc)
|
|
|
|
{
|
|
|
|
procgloballist = leader->procgloballist;
|
|
|
|
|
|
|
|
/* Leader exited first; return its PGPROC. */
|
|
|
|
SpinLockAcquire(ProcStructLock);
|
|
|
|
leader->links.next = (SHM_QUEUE *) *procgloballist;
|
|
|
|
*procgloballist = leader;
|
|
|
|
SpinLockRelease(ProcStructLock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (leader != MyProc)
|
|
|
|
MyProc->lockGroupLeader = NULL;
|
|
|
|
LWLockRelease(leader_lwlock);
|
|
|
|
}
|
|
|
|
|
2014-02-01 03:31:08 +01:00
|
|
|
/*
|
2015-01-14 18:45:22 +01:00
|
|
|
* Reset MyLatch to the process local one. This is so that signal
|
|
|
|
* handlers et al can continue using the latch after the shared latch
|
Improve efficiency of wait event reporting, remove proc.h dependency.
pgstat_report_wait_start() and pgstat_report_wait_end() required two
conditional branches so far. One to check if MyProc is NULL, the other to
check if pgstat_track_activities is set. As wait events are used around
comparatively lightweight operations, and are inlined (reducing branch
predictor effectiveness), that's not great.
The dependency on MyProc has a second disadvantage: Low-level subsystems, like
storage/file/fd.c, report wait events, but architecturally it is preferable
for them to not depend on inter-process subsystems like proc.h (defining
PGPROC). After this change including pgstat.h (nor obviously its
sub-components like backend_status.h, wait_event.h, ...) does not pull in IPC
related headers anymore.
These goals, efficiency and abstraction, are achieved by having
pgstat_report_wait_start/end() not interact with MyProc, but instead a new
my_wait_event_info variable. At backend startup it points to a local variable,
removing the need to check for MyProc being NULL. During process
initialization my_wait_event_info is redirected to MyProc->wait_event_info. At
shutdown this is reversed. Because wait event reporting now does not need to
know about where the wait event is stored, it does not need to know about
PGPROC anymore.
The removal of the branch for checking pgstat_track_activities is simpler:
Don't check anymore. The cost due to the branch are often higher than the
store - and even if not, pgstat_track_activities is rarely disabled.
The main motivator to commit this work now is that removing the (indirect)
pgproc.h include from pgstat.h simplifies a patch to move statistics reporting
to shared memory (which still has a chance to get into 14).
Author: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/20210402194458.2vu324hkk2djq6ce@alap3.anarazel.de
2021-04-03 20:44:47 +02:00
|
|
|
* isn't ours anymore.
|
|
|
|
*
|
|
|
|
* Similarly, stop reporting wait events to MyProc->wait_event_info.
|
|
|
|
*
|
|
|
|
* After that clear MyProc and disown the shared latch.
|
2014-02-01 03:31:08 +01:00
|
|
|
*/
|
2015-01-14 18:45:22 +01:00
|
|
|
SwitchBackToLocalLatch();
|
Improve efficiency of wait event reporting, remove proc.h dependency.
pgstat_report_wait_start() and pgstat_report_wait_end() required two
conditional branches so far. One to check if MyProc is NULL, the other to
check if pgstat_track_activities is set. As wait events are used around
comparatively lightweight operations, and are inlined (reducing branch
predictor effectiveness), that's not great.
The dependency on MyProc has a second disadvantage: Low-level subsystems, like
storage/file/fd.c, report wait events, but architecturally it is preferable
for them to not depend on inter-process subsystems like proc.h (defining
PGPROC). After this change including pgstat.h (nor obviously its
sub-components like backend_status.h, wait_event.h, ...) does not pull in IPC
related headers anymore.
These goals, efficiency and abstraction, are achieved by having
pgstat_report_wait_start/end() not interact with MyProc, but instead a new
my_wait_event_info variable. At backend startup it points to a local variable,
removing the need to check for MyProc being NULL. During process
initialization my_wait_event_info is redirected to MyProc->wait_event_info. At
shutdown this is reversed. Because wait event reporting now does not need to
know about where the wait event is stored, it does not need to know about
PGPROC anymore.
The removal of the branch for checking pgstat_track_activities is simpler:
Don't check anymore. The cost due to the branch are often higher than the
store - and even if not, pgstat_track_activities is rarely disabled.
The main motivator to commit this work now is that removing the (indirect)
pgproc.h include from pgstat.h simplifies a patch to move statistics reporting
to shared memory (which still has a chance to get into 14).
Author: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/20210402194458.2vu324hkk2djq6ce@alap3.anarazel.de
2021-04-03 20:44:47 +02:00
|
|
|
pgstat_reset_wait_event_storage();
|
|
|
|
|
2014-02-01 03:31:08 +01:00
|
|
|
proc = MyProc;
|
|
|
|
MyProc = NULL;
|
|
|
|
DisownLatch(&proc->procLatch);
|
2011-08-10 18:20:30 +02:00
|
|
|
|
2015-07-28 20:51:57 +02:00
|
|
|
procgloballist = proc->procgloballist;
|
2001-09-29 06:02:27 +02:00
|
|
|
SpinLockAcquire(ProcStructLock);
|
2001-06-17 00:58:17 +02:00
|
|
|
|
2016-02-07 16:16:13 +01:00
|
|
|
/*
|
|
|
|
* If we're still a member of a locking group, that means we're a leader
|
|
|
|
* which has somehow exited before its children. The last remaining child
|
|
|
|
* will release our PGPROC. Otherwise, release it now.
|
|
|
|
*/
|
|
|
|
if (proc->lockGroupLeader == NULL)
|
|
|
|
{
|
|
|
|
/* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
|
|
|
|
Assert(dlist_is_empty(&proc->lockGroupMembers));
|
|
|
|
|
|
|
|
/* Return PGPROC structure (and semaphore) to appropriate freelist */
|
|
|
|
proc->links.next = (SHM_QUEUE *) *procgloballist;
|
|
|
|
*procgloballist = proc;
|
|
|
|
}
|
2001-06-17 00:58:17 +02:00
|
|
|
|
2005-10-11 22:41:32 +02:00
|
|
|
/* Update shared estimate of spins_per_delay */
|
2015-10-16 20:20:36 +02:00
|
|
|
ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
|
2005-10-11 22:41:32 +02:00
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
SpinLockRelease(ProcStructLock);
|
2007-04-16 20:30:04 +02:00
|
|
|
|
Install a "dead man switch" to allow the postmaster to detect cases where
a backend has done exit(0) or exit(1) without having disengaged itself
from shared memory. We are at risk for this whenever third-party code is
loaded into a backend, since such code might not know it's supposed to go
through proc_exit() instead. Also, it is reported that under Windows
there are ways to externally kill a process that cause the status code
returned to the postmaster to be indistinguishable from a voluntary exit
(thank you, Microsoft). If this does happen then the system is probably
hosed --- for instance, the dead session might still be holding locks.
So the best recovery method is to treat this like a backend crash.
The dead man switch is armed for a particular child process when it
acquires a regular PGPROC, and disarmed when the PGPROC is released;
these should be the first and last touches of shared memory resources
in a backend, or close enough anyway. This choice means there is no
coverage for auxiliary processes, but I doubt we need that, since they
shouldn't be executing any user-provided code anyway.
This patch also improves the management of the EXEC_BACKEND
ShmemBackendArray array a bit, by reducing search costs.
Although this problem is of long standing, the lack of field complaints
seems to mean it's not critical enough to risk back-patching; at least
not till we get some more testing of this mechanism.
2009-05-05 21:59:00 +02:00
|
|
|
/*
|
|
|
|
* This process is no longer present in shared memory in any meaningful
|
|
|
|
* way, so tell the postmaster we've cleaned up acceptably well. (XXX
|
2009-08-31 21:41:00 +02:00
|
|
|
* autovac launcher should be included here someday)
|
Install a "dead man switch" to allow the postmaster to detect cases where
a backend has done exit(0) or exit(1) without having disengaged itself
from shared memory. We are at risk for this whenever third-party code is
loaded into a backend, since such code might not know it's supposed to go
through proc_exit() instead. Also, it is reported that under Windows
there are ways to externally kill a process that cause the status code
returned to the postmaster to be indistinguishable from a voluntary exit
(thank you, Microsoft). If this does happen then the system is probably
hosed --- for instance, the dead session might still be holding locks.
So the best recovery method is to treat this like a backend crash.
The dead man switch is armed for a particular child process when it
acquires a regular PGPROC, and disarmed when the PGPROC is released;
these should be the first and last touches of shared memory resources
in a backend, or close enough anyway. This choice means there is no
coverage for auxiliary processes, but I doubt we need that, since they
shouldn't be executing any user-provided code anyway.
This patch also improves the management of the EXEC_BACKEND
ShmemBackendArray array a bit, by reducing search costs.
Although this problem is of long standing, the lack of field complaints
seems to mean it's not critical enough to risk back-patching; at least
not till we get some more testing of this mechanism.
2009-05-05 21:59:00 +02:00
|
|
|
*/
|
2009-08-31 21:41:00 +02:00
|
|
|
if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
|
Install a "dead man switch" to allow the postmaster to detect cases where
a backend has done exit(0) or exit(1) without having disengaged itself
from shared memory. We are at risk for this whenever third-party code is
loaded into a backend, since such code might not know it's supposed to go
through proc_exit() instead. Also, it is reported that under Windows
there are ways to externally kill a process that cause the status code
returned to the postmaster to be indistinguishable from a voluntary exit
(thank you, Microsoft). If this does happen then the system is probably
hosed --- for instance, the dead session might still be holding locks.
So the best recovery method is to treat this like a backend crash.
The dead man switch is armed for a particular child process when it
acquires a regular PGPROC, and disarmed when the PGPROC is released;
these should be the first and last touches of shared memory resources
in a backend, or close enough anyway. This choice means there is no
coverage for auxiliary processes, but I doubt we need that, since they
shouldn't be executing any user-provided code anyway.
This patch also improves the management of the EXEC_BACKEND
ShmemBackendArray array a bit, by reducing search costs.
Although this problem is of long standing, the lack of field complaints
seems to mean it's not critical enough to risk back-patching; at least
not till we get some more testing of this mechanism.
2009-05-05 21:59:00 +02:00
|
|
|
MarkPostmasterChildInactive();
|
|
|
|
|
2007-04-16 20:30:04 +02:00
|
|
|
/* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
|
|
|
|
if (AutovacuumLauncherPid != 0)
|
2009-08-31 21:41:00 +02:00
|
|
|
kill(AutovacuumLauncherPid, SIGUSR2);
|
2001-09-29 06:02:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-03-07 14:35:03 +01:00
|
|
|
* AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
|
|
|
|
* processes (bgwriter, etc). The PGPROC and sema are not released, only
|
|
|
|
* marked as not-in-use.
|
2001-09-29 06:02:27 +02:00
|
|
|
*/
|
|
|
|
static void
|
2007-03-07 14:35:03 +01:00
|
|
|
AuxiliaryProcKill(int code, Datum arg)
|
2001-09-29 06:02:27 +02:00
|
|
|
{
|
2004-05-30 00:48:23 +02:00
|
|
|
int proctype = DatumGetInt32(arg);
|
2012-03-21 22:30:14 +01:00
|
|
|
PGPROC *auxproc PG_USED_FOR_ASSERTS_ONLY;
|
2014-02-01 03:31:08 +01:00
|
|
|
PGPROC *proc;
|
2003-11-19 16:55:08 +01:00
|
|
|
|
2007-03-07 14:35:03 +01:00
|
|
|
Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
|
2003-11-19 16:55:08 +01:00
|
|
|
|
2007-03-07 14:35:03 +01:00
|
|
|
auxproc = &AuxiliaryProcs[proctype];
|
2003-11-19 16:55:08 +01:00
|
|
|
|
2007-03-07 14:35:03 +01:00
|
|
|
Assert(MyProc == auxproc);
|
2001-09-29 06:02:27 +02:00
|
|
|
|
2005-08-08 05:12:16 +02:00
|
|
|
/* Release any LW locks I am holding (see notes above) */
|
2001-09-29 06:02:27 +02:00
|
|
|
LWLockReleaseAll();
|
|
|
|
|
2016-11-22 20:26:40 +01:00
|
|
|
/* Cancel any pending condition variable sleep, too */
|
|
|
|
ConditionVariableCancelSleep();
|
|
|
|
|
Improve efficiency of wait event reporting, remove proc.h dependency.
pgstat_report_wait_start() and pgstat_report_wait_end() required two
conditional branches so far. One to check if MyProc is NULL, the other to
check if pgstat_track_activities is set. As wait events are used around
comparatively lightweight operations, and are inlined (reducing branch
predictor effectiveness), that's not great.
The dependency on MyProc has a second disadvantage: Low-level subsystems, like
storage/file/fd.c, report wait events, but architecturally it is preferable
for them to not depend on inter-process subsystems like proc.h (defining
PGPROC). After this change including pgstat.h (nor obviously its
sub-components like backend_status.h, wait_event.h, ...) does not pull in IPC
related headers anymore.
These goals, efficiency and abstraction, are achieved by having
pgstat_report_wait_start/end() not interact with MyProc, but instead a new
my_wait_event_info variable. At backend startup it points to a local variable,
removing the need to check for MyProc being NULL. During process
initialization my_wait_event_info is redirected to MyProc->wait_event_info. At
shutdown this is reversed. Because wait event reporting now does not need to
know about where the wait event is stored, it does not need to know about
PGPROC anymore.
The removal of the branch for checking pgstat_track_activities is simpler:
Don't check anymore. The cost due to the branch are often higher than the
store - and even if not, pgstat_track_activities is rarely disabled.
The main motivator to commit this work now is that removing the (indirect)
pgproc.h include from pgstat.h simplifies a patch to move statistics reporting
to shared memory (which still has a chance to get into 14).
Author: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/20210402194458.2vu324hkk2djq6ce@alap3.anarazel.de
2021-04-03 20:44:47 +02:00
|
|
|
/* look at the equivalent ProcKill() code for comments */
|
2015-01-14 18:45:22 +01:00
|
|
|
SwitchBackToLocalLatch();
|
Improve efficiency of wait event reporting, remove proc.h dependency.
pgstat_report_wait_start() and pgstat_report_wait_end() required two
conditional branches so far. One to check if MyProc is NULL, the other to
check if pgstat_track_activities is set. As wait events are used around
comparatively lightweight operations, and are inlined (reducing branch
predictor effectiveness), that's not great.
The dependency on MyProc has a second disadvantage: Low-level subsystems, like
storage/file/fd.c, report wait events, but architecturally it is preferable
for them to not depend on inter-process subsystems like proc.h (defining
PGPROC). After this change including pgstat.h (nor obviously its
sub-components like backend_status.h, wait_event.h, ...) does not pull in IPC
related headers anymore.
These goals, efficiency and abstraction, are achieved by having
pgstat_report_wait_start/end() not interact with MyProc, but instead a new
my_wait_event_info variable. At backend startup it points to a local variable,
removing the need to check for MyProc being NULL. During process
initialization my_wait_event_info is redirected to MyProc->wait_event_info. At
shutdown this is reversed. Because wait event reporting now does not need to
know about where the wait event is stored, it does not need to know about
PGPROC anymore.
The removal of the branch for checking pgstat_track_activities is simpler:
Don't check anymore. The cost due to the branch are often higher than the
store - and even if not, pgstat_track_activities is rarely disabled.
The main motivator to commit this work now is that removing the (indirect)
pgproc.h include from pgstat.h simplifies a patch to move statistics reporting
to shared memory (which still has a chance to get into 14).
Author: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/20210402194458.2vu324hkk2djq6ce@alap3.anarazel.de
2021-04-03 20:44:47 +02:00
|
|
|
pgstat_reset_wait_event_storage();
|
|
|
|
|
2014-02-01 03:31:08 +01:00
|
|
|
proc = MyProc;
|
|
|
|
MyProc = NULL;
|
|
|
|
DisownLatch(&proc->procLatch);
|
2011-08-10 18:20:30 +02:00
|
|
|
|
2005-10-11 22:41:32 +02:00
|
|
|
SpinLockAcquire(ProcStructLock);
|
|
|
|
|
2007-03-07 14:35:03 +01:00
|
|
|
/* Mark auxiliary proc no longer in use */
|
2014-02-01 03:31:08 +01:00
|
|
|
proc->pid = 0;
|
2005-10-11 22:41:32 +02:00
|
|
|
|
|
|
|
/* Update shared estimate of spins_per_delay */
|
|
|
|
ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
|
|
|
|
|
|
|
|
SpinLockRelease(ProcStructLock);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2017-03-27 04:02:22 +02:00
|
|
|
/*
|
|
|
|
* AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
|
|
|
|
* given its PID
|
|
|
|
*
|
|
|
|
* Returns NULL if not found.
|
|
|
|
*/
|
|
|
|
PGPROC *
|
|
|
|
AuxiliaryPidGetProc(int pid)
|
|
|
|
{
|
|
|
|
PGPROC *result = NULL;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
if (pid == 0) /* never match dummy PGPROCs */
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
|
|
|
|
{
|
|
|
|
PGPROC *proc = &AuxiliaryProcs[index];
|
|
|
|
|
|
|
|
if (proc->pid == pid)
|
|
|
|
{
|
|
|
|
result = proc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2001-06-17 00:58:17 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
|
|
|
* ProcQueue package: routines for putting processes to sleep
|
|
|
|
* and waking them up
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ProcQueueAlloc -- alloc/attach to a shared memory process queue
|
|
|
|
*
|
2010-04-28 18:54:16 +02:00
|
|
|
* Returns: a pointer to the queue
|
|
|
|
* Side Effects: Initializes the queue if it wasn't there before
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
1997-08-19 23:40:56 +02:00
|
|
|
#ifdef NOT_USED
|
1996-07-09 08:22:35 +02:00
|
|
|
PROC_QUEUE *
|
2010-04-28 18:54:16 +02:00
|
|
|
ProcQueueAlloc(const char *name)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2010-04-28 18:54:16 +02:00
|
|
|
PROC_QUEUE *queue;
|
1996-07-09 08:22:35 +02:00
|
|
|
bool found;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-04-28 18:54:16 +02:00
|
|
|
queue = (PROC_QUEUE *)
|
|
|
|
ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
if (!found)
|
|
|
|
ProcQueueInit(queue);
|
2010-04-28 18:54:16 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
return queue;
|
|
|
|
}
|
1997-08-19 23:40:56 +02:00
|
|
|
#endif
|
1996-07-09 08:22:35 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ProcQueueInit -- initialize a shared memory process queue
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ProcQueueInit(PROC_QUEUE *queue)
|
|
|
|
{
|
|
|
|
SHMQueueInit(&(queue->links));
|
|
|
|
queue->size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2005-12-11 22:02:18 +01:00
|
|
|
* ProcSleep -- put a process to sleep on the specified lock
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2001-01-25 04:31:16 +01:00
|
|
|
* Caller must have set MyProc->heldLocks to reflect locks already held
|
|
|
|
* on the lockable object by this process (under all XIDs).
|
2001-01-14 06:08:17 +01:00
|
|
|
*
|
2005-12-11 22:02:18 +01:00
|
|
|
* The lock table's partition lock must be held at entry, and will be held
|
2001-01-14 06:08:17 +01:00
|
|
|
* at exit.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2020-06-17 09:14:37 +02:00
|
|
|
* Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR if not (deadlock).
|
2000-11-29 00:27:57 +01:00
|
|
|
*
|
1996-07-09 08:22:35 +02:00
|
|
|
* ASSUME: that no one will fiddle with the queue until after
|
2005-12-11 22:02:18 +01:00
|
|
|
* we release the partition lock.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* NOTES: The process queue is now a priority queue for locking.
|
|
|
|
*/
|
2020-06-17 09:14:37 +02:00
|
|
|
ProcWaitStatus
|
2005-12-11 22:02:18 +01:00
|
|
|
ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2005-12-11 22:02:18 +01:00
|
|
|
LOCKMODE lockmode = locallock->tag.mode;
|
|
|
|
LOCK *lock = locallock->lock;
|
|
|
|
PROCLOCK *proclock = locallock->proclock;
|
2006-07-24 01:08:46 +02:00
|
|
|
uint32 hashcode = locallock->hashcode;
|
2014-01-27 17:07:44 +01:00
|
|
|
LWLock *partitionLock = LockHashPartitionLock(hashcode);
|
2001-01-25 04:31:16 +01:00
|
|
|
PROC_QUEUE *waitQueue = &(lock->waitProcs);
|
Try to reduce confusion about what is a lock method identifier, a lock
method control structure, or a table of control structures.
. Use type LOCKMASK where an int is not a counter.
. Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead.
. Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because
LOCKMETHOD is not a pointer.
. Define and use macro LockMethodIsValid.
. Rename LOCKMETHOD to LOCKMETHODID.
. Remove global variable LongTermTableId in lmgr.c, because it is
never used.
. Make LockTableId static in lmgr.c, because it is used nowhere else.
Why not remove it and use DEFAULT_LOCKMETHOD?
. Rename the lock method control structure from LOCKMETHODTABLE to
LockMethodData. Introduce a pointer type named LockMethod.
. Remove elog(FATAL) after InitLockTable() call in
CreateSharedMemoryAndSemaphores(), because if something goes wrong,
there is elog(FATAL) in LockMethodTableInit(), and if this doesn't
help, an elog(ERROR) in InitLockTable() is promoted to FATAL.
. Make InitLockTable() void, because its only caller does not use its
return value any more.
. Rename variables in lock.c to avoid statements like
LockMethodTable[NumLockMethods] = lockMethodTable;
lockMethodTable = LockMethodTable[lockmethod];
. Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG.
. Remove static variables BITS_OFF and BITS_ON from lock.c, because
I agree to this doubt:
* XXX is a fetch from a static array really faster than a shift?
. Define and use macros LOCKBIT_ON/OFF.
Manfred Koizar
2003-12-01 22:59:25 +01:00
|
|
|
LOCKMASK myHeldLocks = MyProc->heldLocks;
|
2021-01-07 16:47:03 +01:00
|
|
|
TimestampTz standbyWaitStart = 0;
|
2001-09-04 04:26:57 +02:00
|
|
|
bool early_deadlock = false;
|
2007-10-26 22:45:10 +02:00
|
|
|
bool allow_autovacuum_cancel = true;
|
2021-01-07 16:47:03 +01:00
|
|
|
bool logged_recovery_conflict = false;
|
2020-06-17 09:14:37 +02:00
|
|
|
ProcWaitStatus myWaitStatus;
|
2002-06-11 15:40:53 +02:00
|
|
|
PGPROC *proc;
|
2016-02-07 16:16:13 +01:00
|
|
|
PGPROC *leader = MyProc->lockGroupLeader;
|
2000-12-22 01:51:54 +01:00
|
|
|
int i;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2016-02-07 16:16:13 +01:00
|
|
|
/*
|
2016-02-21 11:12:02 +01:00
|
|
|
* If group locking is in use, locks held by members of my locking group
|
2020-03-20 03:50:56 +01:00
|
|
|
* need to be included in myHeldLocks. This is not required for relation
|
2020-03-21 04:18:06 +01:00
|
|
|
* extension or page locks which conflict among group members. However,
|
|
|
|
* including them in myHeldLocks will give group members the priority to
|
|
|
|
* get those locks as compared to other backends which are also trying to
|
|
|
|
* acquire those locks. OTOH, we can avoid giving priority to group
|
|
|
|
* members for that kind of locks, but there doesn't appear to be a clear
|
|
|
|
* advantage of the same.
|
2016-02-07 16:16:13 +01:00
|
|
|
*/
|
|
|
|
if (leader != NULL)
|
|
|
|
{
|
|
|
|
SHM_QUEUE *procLocks = &(lock->procLocks);
|
|
|
|
PROCLOCK *otherproclock;
|
|
|
|
|
|
|
|
otherproclock = (PROCLOCK *)
|
|
|
|
SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
|
|
|
|
while (otherproclock != NULL)
|
|
|
|
{
|
|
|
|
if (otherproclock->groupLeader == leader)
|
|
|
|
myHeldLocks |= otherproclock->holdMask;
|
|
|
|
otherproclock = (PROCLOCK *)
|
|
|
|
SHMQueueNext(procLocks, &otherproclock->lockLink,
|
|
|
|
offsetof(PROCLOCK, lockLink));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2001-01-25 04:31:16 +01:00
|
|
|
/*
|
|
|
|
* Determine where to add myself in the wait queue.
|
|
|
|
*
|
|
|
|
* Normally I should go at the end of the queue. However, if I already
|
|
|
|
* hold locks that conflict with the request of any previous waiter, put
|
|
|
|
* myself in the queue just in front of the first such waiter. This is not
|
|
|
|
* a necessary step, since deadlock detection would move me to before that
|
|
|
|
* waiter anyway; but it's relatively cheap to detect such a conflict
|
|
|
|
* immediately, and avoid delaying till deadlock timeout.
|
|
|
|
*
|
2001-01-26 19:23:12 +01:00
|
|
|
* Special case: if I find I should go in front of some waiter, check to
|
|
|
|
* see if I conflict with already-held locks or the requests before that
|
|
|
|
* waiter. If not, then just grant myself the requested lock immediately.
|
|
|
|
* This is the same as the test for immediate grant in LockAcquire, except
|
|
|
|
* we are only considering the part of the wait queue before my insertion
|
|
|
|
* point.
|
2001-01-25 04:31:16 +01:00
|
|
|
*/
|
|
|
|
if (myHeldLocks != 0)
|
1999-05-07 03:23:11 +02:00
|
|
|
{
|
Try to reduce confusion about what is a lock method identifier, a lock
method control structure, or a table of control structures.
. Use type LOCKMASK where an int is not a counter.
. Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead.
. Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because
LOCKMETHOD is not a pointer.
. Define and use macro LockMethodIsValid.
. Rename LOCKMETHOD to LOCKMETHODID.
. Remove global variable LongTermTableId in lmgr.c, because it is
never used.
. Make LockTableId static in lmgr.c, because it is used nowhere else.
Why not remove it and use DEFAULT_LOCKMETHOD?
. Rename the lock method control structure from LOCKMETHODTABLE to
LockMethodData. Introduce a pointer type named LockMethod.
. Remove elog(FATAL) after InitLockTable() call in
CreateSharedMemoryAndSemaphores(), because if something goes wrong,
there is elog(FATAL) in LockMethodTableInit(), and if this doesn't
help, an elog(ERROR) in InitLockTable() is promoted to FATAL.
. Make InitLockTable() void, because its only caller does not use its
return value any more.
. Rename variables in lock.c to avoid statements like
LockMethodTable[NumLockMethods] = lockMethodTable;
lockMethodTable = LockMethodTable[lockmethod];
. Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG.
. Remove static variables BITS_OFF and BITS_ON from lock.c, because
I agree to this doubt:
* XXX is a fetch from a static array really faster than a shift?
. Define and use macros LOCKBIT_ON/OFF.
Manfred Koizar
2003-12-01 22:59:25 +01:00
|
|
|
LOCKMASK aheadRequests = 0;
|
2001-01-26 19:23:12 +01:00
|
|
|
|
2008-11-02 22:24:52 +01:00
|
|
|
proc = (PGPROC *) waitQueue->links.next;
|
2001-01-25 04:31:16 +01:00
|
|
|
for (i = 0; i < waitQueue->size; i++)
|
1999-05-07 03:23:11 +02:00
|
|
|
{
|
2016-02-07 16:16:13 +01:00
|
|
|
/*
|
|
|
|
* If we're part of the same locking group as this waiter, its
|
2016-02-21 11:12:02 +01:00
|
|
|
* locks neither conflict with ours nor contribute to
|
|
|
|
* aheadRequests.
|
2016-02-07 16:16:13 +01:00
|
|
|
*/
|
|
|
|
if (leader != NULL && leader == proc->lockGroupLeader)
|
|
|
|
{
|
|
|
|
proc = (PGPROC *) proc->links.next;
|
|
|
|
continue;
|
|
|
|
}
|
2001-01-25 04:31:16 +01:00
|
|
|
/* Must he wait for me? */
|
2002-07-19 01:06:20 +02:00
|
|
|
if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
|
1999-05-07 03:23:11 +02:00
|
|
|
{
|
2001-01-25 04:31:16 +01:00
|
|
|
/* Must I wait for him ? */
|
2002-07-19 01:06:20 +02:00
|
|
|
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
|
2001-01-25 04:31:16 +01:00
|
|
|
{
|
2001-09-04 04:26:57 +02:00
|
|
|
/*
|
|
|
|
* Yes, so we have a deadlock. Easiest way to clean up
|
|
|
|
* correctly is to call RemoveFromWaitQueue(), but we
|
|
|
|
* can't do that until we are *on* the wait queue. So, set
|
|
|
|
* a flag to check below, and break out of loop. Also,
|
2003-01-16 22:01:45 +01:00
|
|
|
* record deadlock info for later message.
|
2001-09-04 04:26:57 +02:00
|
|
|
*/
|
2003-01-16 22:01:45 +01:00
|
|
|
RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
|
2001-09-04 04:26:57 +02:00
|
|
|
early_deadlock = true;
|
|
|
|
break;
|
2001-01-25 04:31:16 +01:00
|
|
|
}
|
2001-01-26 19:23:12 +01:00
|
|
|
/* I must go before this waiter. Check special case. */
|
2002-07-19 01:06:20 +02:00
|
|
|
if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
|
2019-12-29 09:09:20 +01:00
|
|
|
!LockCheckConflicts(lockMethodTable, lockmode, lock,
|
|
|
|
proclock))
|
2001-01-25 04:31:16 +01:00
|
|
|
{
|
2001-01-26 19:23:12 +01:00
|
|
|
/* Skip the wait and just grant myself the lock. */
|
2003-02-18 03:13:24 +01:00
|
|
|
GrantLock(lock, proclock, lockmode);
|
2004-08-27 19:07:42 +02:00
|
|
|
GrantAwaitedLock();
|
2020-06-17 09:14:37 +02:00
|
|
|
return PROC_WAIT_STATUS_OK;
|
2001-01-25 04:31:16 +01:00
|
|
|
}
|
|
|
|
/* Break out of loop to put myself before him */
|
1999-05-07 03:23:11 +02:00
|
|
|
break;
|
2001-01-25 04:31:16 +01:00
|
|
|
}
|
2001-01-26 19:23:12 +01:00
|
|
|
/* Nope, so advance to next waiter */
|
Try to reduce confusion about what is a lock method identifier, a lock
method control structure, or a table of control structures.
. Use type LOCKMASK where an int is not a counter.
. Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead.
. Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because
LOCKMETHOD is not a pointer.
. Define and use macro LockMethodIsValid.
. Rename LOCKMETHOD to LOCKMETHODID.
. Remove global variable LongTermTableId in lmgr.c, because it is
never used.
. Make LockTableId static in lmgr.c, because it is used nowhere else.
Why not remove it and use DEFAULT_LOCKMETHOD?
. Rename the lock method control structure from LOCKMETHODTABLE to
LockMethodData. Introduce a pointer type named LockMethod.
. Remove elog(FATAL) after InitLockTable() call in
CreateSharedMemoryAndSemaphores(), because if something goes wrong,
there is elog(FATAL) in LockMethodTableInit(), and if this doesn't
help, an elog(ERROR) in InitLockTable() is promoted to FATAL.
. Make InitLockTable() void, because its only caller does not use its
return value any more.
. Rename variables in lock.c to avoid statements like
LockMethodTable[NumLockMethods] = lockMethodTable;
lockMethodTable = LockMethodTable[lockmethod];
. Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG.
. Remove static variables BITS_OFF and BITS_ON from lock.c, because
I agree to this doubt:
* XXX is a fetch from a static array really faster than a shift?
. Define and use macros LOCKBIT_ON/OFF.
Manfred Koizar
2003-12-01 22:59:25 +01:00
|
|
|
aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
|
2008-11-02 22:24:52 +01:00
|
|
|
proc = (PGPROC *) proc->links.next;
|
1999-05-07 03:23:11 +02:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2001-01-26 19:23:12 +01:00
|
|
|
/*
|
|
|
|
* If we fall out of loop normally, proc points to waitQueue head, so
|
|
|
|
* we will insert at tail of queue as desired.
|
|
|
|
*/
|
2001-01-25 04:31:16 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* I hold no locks, so I can't push in front of anyone. */
|
2002-06-11 15:40:53 +02:00
|
|
|
proc = (PGPROC *) &(waitQueue->links);
|
1999-05-07 03:23:11 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2001-01-22 23:30:06 +01:00
|
|
|
* Insert self into queue, ahead of the given proc (or at tail of queue).
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2001-01-22 23:30:06 +01:00
|
|
|
SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
|
1998-01-27 04:00:43 +01:00
|
|
|
waitQueue->size++;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Try to reduce confusion about what is a lock method identifier, a lock
method control structure, or a table of control structures.
. Use type LOCKMASK where an int is not a counter.
. Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead.
. Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because
LOCKMETHOD is not a pointer.
. Define and use macro LockMethodIsValid.
. Rename LOCKMETHOD to LOCKMETHODID.
. Remove global variable LongTermTableId in lmgr.c, because it is
never used.
. Make LockTableId static in lmgr.c, because it is used nowhere else.
Why not remove it and use DEFAULT_LOCKMETHOD?
. Rename the lock method control structure from LOCKMETHODTABLE to
LockMethodData. Introduce a pointer type named LockMethod.
. Remove elog(FATAL) after InitLockTable() call in
CreateSharedMemoryAndSemaphores(), because if something goes wrong,
there is elog(FATAL) in LockMethodTableInit(), and if this doesn't
help, an elog(ERROR) in InitLockTable() is promoted to FATAL.
. Make InitLockTable() void, because its only caller does not use its
return value any more.
. Rename variables in lock.c to avoid statements like
LockMethodTable[NumLockMethods] = lockMethodTable;
lockMethodTable = LockMethodTable[lockmethod];
. Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG.
. Remove static variables BITS_OFF and BITS_ON from lock.c, because
I agree to this doubt:
* XXX is a fetch from a static array really faster than a shift?
. Define and use macros LOCKBIT_ON/OFF.
Manfred Koizar
2003-12-01 22:59:25 +01:00
|
|
|
lock->waitMask |= LOCKBIT_ON(lockmode);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2002-06-11 15:40:53 +02:00
|
|
|
/* Set up wait information in PGPROC object, too */
|
2001-01-22 23:30:06 +01:00
|
|
|
MyProc->waitLock = lock;
|
2004-08-27 19:07:42 +02:00
|
|
|
MyProc->waitProcLock = proclock;
|
2001-01-22 23:30:06 +01:00
|
|
|
MyProc->waitLockMode = lockmode;
|
|
|
|
|
2020-06-17 09:14:37 +02:00
|
|
|
MyProc->waitStatus = PROC_WAIT_STATUS_WAITING;
|
2001-09-04 04:26:57 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we detected deadlock, give up without waiting. This must agree with
|
2019-06-21 00:57:07 +02:00
|
|
|
* CheckDeadLock's recovery code.
|
2001-09-04 04:26:57 +02:00
|
|
|
*/
|
|
|
|
if (early_deadlock)
|
|
|
|
{
|
2006-07-24 01:08:46 +02:00
|
|
|
RemoveFromWaitQueue(MyProc, hashcode);
|
2020-06-17 09:14:37 +02:00
|
|
|
return PROC_WAIT_STATUS_ERROR;
|
2001-09-04 04:26:57 +02:00
|
|
|
}
|
2000-11-29 00:27:57 +01:00
|
|
|
|
2001-01-14 06:08:17 +01:00
|
|
|
/* mark that we are waiting for a lock */
|
2005-12-11 22:02:18 +01:00
|
|
|
lockAwaited = locallock;
|
2001-01-14 06:08:17 +01:00
|
|
|
|
|
|
|
/*
|
2005-12-11 22:02:18 +01:00
|
|
|
* Release the lock table's partition lock.
|
2001-01-14 06:08:17 +01:00
|
|
|
*
|
|
|
|
* NOTE: this may also cause us to exit critical-section state, possibly
|
|
|
|
* allowing a cancel/die interrupt to be accepted. This is OK because we
|
|
|
|
* have recorded the fact that we are waiting for a lock, and so
|
2012-04-18 17:17:30 +02:00
|
|
|
* LockErrorCleanup will clean up if cancel/die happens.
|
2001-01-14 06:08:17 +01:00
|
|
|
*/
|
2005-12-11 22:02:18 +01:00
|
|
|
LWLockRelease(partitionLock);
|
2000-12-18 18:33:42 +01:00
|
|
|
|
2011-08-02 21:16:29 +02:00
|
|
|
/*
|
|
|
|
* Also, now that we will successfully clean up after an ereport, it's
|
|
|
|
* safe to check to see if there's a buffer pin deadlock against the
|
|
|
|
* Startup process. Of course, that's only necessary if we're doing Hot
|
|
|
|
* Standby and are not the Startup process ourselves.
|
|
|
|
*/
|
|
|
|
if (RecoveryInProgress() && !InRecovery)
|
|
|
|
CheckRecoveryConflictDeadlock();
|
|
|
|
|
Introduce timeout handling framework
Management of timeouts was getting a little cumbersome; what we
originally had was more than enough back when we were only concerned
about deadlocks and query cancel; however, when we added timeouts for
standby processes, the code got considerably messier. Since there are
plans to add more complex timeouts, this seems a good time to introduce
a central timeout handling module.
External modules register their timeout handlers during process
initialization, and later enable and disable them as they see fit using
a simple API; timeout.c is in charge of keeping track of which timeouts
are in effect at any time, installing a common SIGALRM signal handler,
and calling setitimer() as appropriate to ensure timely firing of
external handlers.
timeout.c additionally supports pluggable modules to add their own
timeouts, though this capability isn't exercised anywhere yet.
Additionally, as of this commit, walsender processes are aware of
timeouts; we had a preexisting bug there that made those ignore SIGALRM,
thus being subject to unhandled deadlocks, particularly during the
authentication phase. This has already been fixed in back branches in
commit 0bf8eb2a, which see for more details.
Main author: Zoltán Böszörményi
Some review and cleanup by Álvaro Herrera
Extensive reworking by Tom Lane
2012-07-17 00:43:21 +02:00
|
|
|
/* Reset deadlock_state before enabling the timeout handler */
|
2007-06-19 22:13:22 +02:00
|
|
|
deadlock_state = DS_NOT_YET_CHECKED;
|
2015-02-03 23:24:38 +01:00
|
|
|
got_deadlock_timeout = false;
|
2007-06-19 22:13:22 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-11-29 00:27:57 +01:00
|
|
|
* Set timer so we can wake up after awhile and check for a deadlock. If a
|
2019-06-21 00:57:07 +02:00
|
|
|
* deadlock is detected, the handler sets MyProc->waitStatus =
|
2020-06-17 09:14:37 +02:00
|
|
|
* PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
|
2019-06-21 00:57:07 +02:00
|
|
|
* rather than success.
|
2000-11-29 00:27:57 +01:00
|
|
|
*
|
|
|
|
* By delaying the check until we've waited for a bit, we can avoid
|
|
|
|
* running the rather expensive deadlock-check code in most cases.
|
2013-03-17 04:22:17 +01:00
|
|
|
*
|
|
|
|
* If LockTimeout is set, also enable the timeout for that. We can save a
|
|
|
|
* few cycles by enabling both timeout sources in one call.
|
2016-03-10 20:26:24 +01:00
|
|
|
*
|
|
|
|
* If InHotStandby we set lock waits slightly later for clarity with other
|
|
|
|
* code.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2016-03-10 20:26:24 +01:00
|
|
|
if (!InHotStandby)
|
2013-03-17 04:22:17 +01:00
|
|
|
{
|
2016-03-10 20:26:24 +01:00
|
|
|
if (LockTimeout > 0)
|
|
|
|
{
|
|
|
|
EnableTimeoutParams timeouts[2];
|
|
|
|
|
|
|
|
timeouts[0].id = DEADLOCK_TIMEOUT;
|
|
|
|
timeouts[0].type = TMPARAM_AFTER;
|
|
|
|
timeouts[0].delay_ms = DeadlockTimeout;
|
|
|
|
timeouts[1].id = LOCK_TIMEOUT;
|
|
|
|
timeouts[1].type = TMPARAM_AFTER;
|
|
|
|
timeouts[1].delay_ms = LockTimeout;
|
|
|
|
enable_timeouts(timeouts, 2);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
|
Display the time when the process started waiting for the lock, in pg_locks, take 2
This commit adds new column "waitstart" into pg_locks view. This column
reports the time when the server process started waiting for the lock
if the lock is not held. This information is useful, for example, when
examining the amount of time to wait on a lock by subtracting
"waitstart" in pg_locks from the current time, and identify the lock
that the processes are waiting for very long.
This feature uses the current time obtained for the deadlock timeout
timer as "waitstart" (i.e., the time when this process started waiting
for the lock). Since getting the current time newly can cause overhead,
we reuse the already-obtained time to avoid that overhead.
Note that "waitstart" is updated without holding the lock table's
partition lock, to avoid the overhead by additional lock acquisition.
This can cause "waitstart" in pg_locks to become NULL for a very short
period of time after the wait started even though "granted" is false.
This is OK in practice because we can assume that users are likely to
look at "waitstart" when waiting for the lock for a long time.
The first attempt of this patch (commit 3b733fcd04) caused the buildfarm
member "rorqual" (built with --disable-atomics --disable-spinlocks) to report
the failure of the regression test. It was reverted by commit 890d2182a2.
The cause of this failure was that the atomic variable for "waitstart"
in the dummy process entry created at the end of prepare transaction was
not initialized. This second attempt fixes that issue.
Bump catalog version.
Author: Atsushi Torikoshi
Reviewed-by: Ian Lawrence Barwick, Robert Haas, Justin Pryzby, Fujii Masao
Discussion: https://postgr.es/m/a96013dc51cdc56b2a2b84fa8a16a993@oss.nttdata.com
2021-02-15 07:13:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the current time obtained for the deadlock timeout timer as
|
|
|
|
* waitStart (i.e., the time when this process started waiting for the
|
|
|
|
* lock). Since getting the current time newly can cause overhead, we
|
|
|
|
* reuse the already-obtained time to avoid that overhead.
|
|
|
|
*
|
|
|
|
* Note that waitStart is updated without holding the lock table's
|
|
|
|
* partition lock, to avoid the overhead by additional lock
|
|
|
|
* acquisition. This can cause "waitstart" in pg_locks to become NULL
|
|
|
|
* for a very short period of time after the wait started even though
|
|
|
|
* "granted" is false. This is OK in practice because we can assume
|
|
|
|
* that users are likely to look at "waitstart" when waiting for the
|
|
|
|
* lock for a long time.
|
|
|
|
*/
|
|
|
|
pg_atomic_write_u64(&MyProc->waitStart,
|
|
|
|
get_timeout_start_time(DEADLOCK_TIMEOUT));
|
2013-03-17 04:22:17 +01:00
|
|
|
}
|
2021-01-07 16:47:03 +01:00
|
|
|
else if (log_recovery_conflict_waits)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Set the wait start timestamp if logging is enabled and in hot
|
|
|
|
* standby.
|
|
|
|
*/
|
|
|
|
standbyWaitStart = GetCurrentTimestamp();
|
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-11-29 00:27:57 +01:00
|
|
|
/*
|
2015-02-03 23:24:38 +01:00
|
|
|
* If somebody wakes us between LWLockRelease and WaitLatch, the latch
|
|
|
|
* will not wait. But a set latch does not necessarily mean that the lock
|
|
|
|
* is free now, as there are many other sources for latch sets than
|
|
|
|
* somebody releasing the lock.
|
2001-01-14 06:08:17 +01:00
|
|
|
*
|
2015-02-03 23:24:38 +01:00
|
|
|
* We process interrupts whenever the latch has been set, so cancel/die
|
|
|
|
* interrupts are processed quickly. This means we must not mind losing
|
|
|
|
* control to a cancel/die interrupt here. We don't, because we have no
|
|
|
|
* shared-state-change work to do after being granted the lock (the
|
|
|
|
* grantor did it all). We do have to worry about canceling the deadlock
|
|
|
|
* timeout and updating the locallock table, but if we lose control to an
|
|
|
|
* error, LockErrorCleanup will fix that up.
|
2000-11-29 00:27:57 +01:00
|
|
|
*/
|
2006-04-14 05:38:56 +02:00
|
|
|
do
|
|
|
|
{
|
2016-03-10 20:26:24 +01:00
|
|
|
if (InHotStandby)
|
2015-02-03 23:24:38 +01:00
|
|
|
{
|
2021-01-07 16:47:03 +01:00
|
|
|
bool maybe_log_conflict =
|
|
|
|
(standbyWaitStart != 0 && !logged_recovery_conflict);
|
|
|
|
|
|
|
|
/* Set a timer and wait for that or for the lock to be granted */
|
|
|
|
ResolveRecoveryConflictWithLock(locallock->tag.lock,
|
|
|
|
maybe_log_conflict);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Emit the log message if the startup process is waiting longer
|
|
|
|
* than deadlock_timeout for recovery conflict on lock.
|
|
|
|
*/
|
|
|
|
if (maybe_log_conflict)
|
|
|
|
{
|
|
|
|
TimestampTz now = GetCurrentTimestamp();
|
|
|
|
|
|
|
|
if (TimestampDifferenceExceeds(standbyWaitStart, now,
|
|
|
|
DeadlockTimeout))
|
|
|
|
{
|
|
|
|
VirtualTransactionId *vxids;
|
|
|
|
int cnt;
|
|
|
|
|
|
|
|
vxids = GetLockConflicts(&locallock->tag.lock,
|
|
|
|
AccessExclusiveLock, &cnt);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Log the recovery conflict and the list of PIDs of
|
|
|
|
* backends holding the conflicting lock. Note that we do
|
|
|
|
* logging even if there are no such backends right now
|
|
|
|
* because the startup process here has already waited
|
|
|
|
* longer than deadlock_timeout.
|
|
|
|
*/
|
|
|
|
LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
|
2021-01-13 14:59:17 +01:00
|
|
|
standbyWaitStart, now,
|
|
|
|
cnt > 0 ? vxids : NULL, true);
|
2021-01-07 16:47:03 +01:00
|
|
|
logged_recovery_conflict = true;
|
|
|
|
}
|
|
|
|
}
|
2016-03-10 20:26:24 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
Add WL_EXIT_ON_PM_DEATH pseudo-event.
Users of the WaitEventSet and WaitLatch() APIs can now choose between
asking for WL_POSTMASTER_DEATH and then handling it explicitly, or asking
for WL_EXIT_ON_PM_DEATH to trigger immediate exit on postmaster death.
This reduces code duplication, since almost all callers want the latter.
Repair all code that was previously ignoring postmaster death completely,
or requesting the event but ignoring it, or requesting the event but then
doing an unconditional PostmasterIsAlive() call every time through its
event loop (which is an expensive syscall on platforms for which we don't
have USE_POSTMASTER_DEATH_SIGNAL support).
Assert that callers of WaitLatchXXX() under the postmaster remember to
ask for either WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH, to prevent
future bugs.
The only process that doesn't handle postmaster death is syslogger. It
waits until all backends holding the write end of the syslog pipe
(including the postmaster) have closed it by exiting, to be sure to
capture any parting messages. By using the WaitEventSet API directly
it avoids the new assertion, and as a by-product it may be slightly
more efficient on platforms that have epoll().
Author: Thomas Munro
Reviewed-by: Kyotaro Horiguchi, Heikki Linnakangas, Tom Lane
Discussion: https://postgr.es/m/CAEepm%3D1TCviRykkUb69ppWLr_V697rzd1j3eZsRMmbXvETfqbQ%40mail.gmail.com,
https://postgr.es/m/CAEepm=2LqHzizbe7muD7-2yHUbTOoF7Q+qkSD5Q41kuhttRTwA@mail.gmail.com
2018-11-23 08:16:41 +01:00
|
|
|
(void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
|
|
|
|
PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
|
2016-03-10 20:26:24 +01:00
|
|
|
ResetLatch(MyLatch);
|
|
|
|
/* check for deadlocks first, as that's probably log-worthy */
|
|
|
|
if (got_deadlock_timeout)
|
|
|
|
{
|
|
|
|
CheckDeadLock();
|
|
|
|
got_deadlock_timeout = false;
|
|
|
|
}
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
2015-02-03 23:24:38 +01:00
|
|
|
}
|
2007-06-19 22:13:22 +02:00
|
|
|
|
2007-08-28 05:23:44 +02:00
|
|
|
/*
|
2020-06-17 09:14:37 +02:00
|
|
|
* waitStatus could change from PROC_WAIT_STATUS_WAITING to something
|
2007-08-28 05:23:44 +02:00
|
|
|
* else asynchronously. Read it just once per loop to prevent
|
|
|
|
* surprising behavior (such as missing log messages).
|
|
|
|
*/
|
2020-06-17 09:14:37 +02:00
|
|
|
myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
|
2007-08-28 05:23:44 +02:00
|
|
|
|
2007-10-26 22:45:10 +02:00
|
|
|
/*
|
|
|
|
* If we are not deadlocked, but are waiting on an autovacuum-induced
|
|
|
|
* task, send a signal to interrupt it.
|
|
|
|
*/
|
|
|
|
if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
|
|
|
|
{
|
|
|
|
PGPROC *autovac = GetBlockingAutoVacuumPgproc();
|
2020-11-16 23:42:55 +01:00
|
|
|
uint8 statusFlags;
|
2020-11-23 22:55:23 +01:00
|
|
|
uint8 lockmethod_copy;
|
|
|
|
LOCKTAG locktag_copy;
|
2007-10-26 22:45:10 +02:00
|
|
|
|
2020-11-23 22:55:23 +01:00
|
|
|
/*
|
|
|
|
* Grab info we need, then release lock immediately. Note this
|
|
|
|
* coding means that there is a tiny chance that the process
|
|
|
|
* terminates its current transaction and starts a different one
|
|
|
|
* before we have a change to send the signal; the worst possible
|
|
|
|
* consequence is that a for-wraparound vacuum is cancelled. But
|
|
|
|
* that could happen in any case unless we were to do kill() with
|
|
|
|
* the lock held, which is much more undesirable.
|
|
|
|
*/
|
2007-10-26 22:45:10 +02:00
|
|
|
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
|
2020-11-23 22:55:23 +01:00
|
|
|
statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
|
|
|
|
lockmethod_copy = lock->tag.locktag_lockmethodid;
|
|
|
|
locktag_copy = lock->tag;
|
|
|
|
LWLockRelease(ProcArrayLock);
|
2007-10-26 22:45:10 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only do it if the worker is not working to protect against Xid
|
|
|
|
* wraparound.
|
|
|
|
*/
|
2020-11-16 23:42:55 +01:00
|
|
|
if ((statusFlags & PROC_IS_AUTOVACUUM) &&
|
|
|
|
!(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
|
2007-10-26 22:45:10 +02:00
|
|
|
{
|
|
|
|
int pid = autovac->pid;
|
2012-07-26 15:18:32 +02:00
|
|
|
|
2020-11-23 22:55:23 +01:00
|
|
|
/* report the case, if configured to do so */
|
2020-11-24 01:04:07 +01:00
|
|
|
if (message_level_is_interesting(DEBUG1))
|
|
|
|
{
|
|
|
|
StringInfoData locktagbuf;
|
|
|
|
StringInfoData logbuf; /* errdetail for server log */
|
|
|
|
|
|
|
|
initStringInfo(&locktagbuf);
|
|
|
|
initStringInfo(&logbuf);
|
|
|
|
DescribeLockTag(&locktagbuf, &locktag_copy);
|
|
|
|
appendStringInfo(&logbuf,
|
2021-02-17 11:24:46 +01:00
|
|
|
"Process %d waits for %s on %s.",
|
2020-11-24 01:04:07 +01:00
|
|
|
MyProcPid,
|
|
|
|
GetLockmodeName(lockmethod_copy, lockmode),
|
|
|
|
locktagbuf.data);
|
|
|
|
|
|
|
|
ereport(DEBUG1,
|
2021-02-17 11:24:46 +01:00
|
|
|
(errmsg_internal("sending cancel to blocking autovacuum PID %d",
|
2020-11-24 01:04:07 +01:00
|
|
|
pid),
|
|
|
|
errdetail_log("%s", logbuf.data)));
|
|
|
|
|
|
|
|
pfree(locktagbuf.data);
|
|
|
|
pfree(logbuf.data);
|
|
|
|
}
|
2020-11-23 22:55:23 +01:00
|
|
|
|
|
|
|
/* send the autovacuum worker Back to Old Kent Road */
|
2007-10-26 22:45:10 +02:00
|
|
|
if (kill(pid, SIGINT) < 0)
|
|
|
|
{
|
2015-07-28 23:34:00 +02:00
|
|
|
/*
|
|
|
|
* There's a race condition here: once we release the
|
|
|
|
* ProcArrayLock, it's possible for the autovac worker to
|
|
|
|
* close up shop and exit before we can do the kill().
|
|
|
|
* Therefore, we do not whinge about no-such-process.
|
|
|
|
* Other errors such as EPERM could conceivably happen if
|
|
|
|
* the kernel recycles the PID fast enough, but such cases
|
|
|
|
* seem improbable enough that it's probably best to issue
|
|
|
|
* a warning if we see some other errno.
|
|
|
|
*/
|
|
|
|
if (errno != ESRCH)
|
|
|
|
ereport(WARNING,
|
|
|
|
(errmsg("could not send signal to process %d: %m",
|
|
|
|
pid)));
|
2007-10-26 22:45:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 08:50:19 +02:00
|
|
|
/* prevent signal from being sent again more than once */
|
2007-10-26 22:45:10 +02:00
|
|
|
allow_autovacuum_cancel = false;
|
|
|
|
}
|
|
|
|
|
2007-06-19 22:13:22 +02:00
|
|
|
/*
|
|
|
|
* If awoken after the deadlock check interrupt has run, and
|
|
|
|
* log_lock_waits is on, then report about the wait.
|
|
|
|
*/
|
2007-08-28 05:23:44 +02:00
|
|
|
if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
|
2007-06-19 22:13:22 +02:00
|
|
|
{
|
2014-03-12 19:26:47 +01:00
|
|
|
StringInfoData buf,
|
|
|
|
lock_waiters_sbuf,
|
|
|
|
lock_holders_sbuf;
|
2007-08-28 05:23:44 +02:00
|
|
|
const char *modename;
|
|
|
|
long secs;
|
|
|
|
int usecs;
|
|
|
|
long msecs;
|
2014-03-12 19:26:47 +01:00
|
|
|
SHM_QUEUE *procLocks;
|
|
|
|
PROCLOCK *proclock;
|
|
|
|
bool first_holder = true,
|
|
|
|
first_waiter = true;
|
|
|
|
int lockHoldersNum = 0;
|
2007-08-28 05:23:44 +02:00
|
|
|
|
|
|
|
initStringInfo(&buf);
|
2014-03-12 19:26:47 +01:00
|
|
|
initStringInfo(&lock_waiters_sbuf);
|
|
|
|
initStringInfo(&lock_holders_sbuf);
|
|
|
|
|
2007-08-28 05:23:44 +02:00
|
|
|
DescribeLockTag(&buf, &locallock->tag.lock);
|
|
|
|
modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
|
|
|
|
lockmode);
|
Introduce timeout handling framework
Management of timeouts was getting a little cumbersome; what we
originally had was more than enough back when we were only concerned
about deadlocks and query cancel; however, when we added timeouts for
standby processes, the code got considerably messier. Since there are
plans to add more complex timeouts, this seems a good time to introduce
a central timeout handling module.
External modules register their timeout handlers during process
initialization, and later enable and disable them as they see fit using
a simple API; timeout.c is in charge of keeping track of which timeouts
are in effect at any time, installing a common SIGALRM signal handler,
and calling setitimer() as appropriate to ensure timely firing of
external handlers.
timeout.c additionally supports pluggable modules to add their own
timeouts, though this capability isn't exercised anywhere yet.
Additionally, as of this commit, walsender processes are aware of
timeouts; we had a preexisting bug there that made those ignore SIGALRM,
thus being subject to unhandled deadlocks, particularly during the
authentication phase. This has already been fixed in back branches in
commit 0bf8eb2a, which see for more details.
Main author: Zoltán Böszörményi
Some review and cleanup by Álvaro Herrera
Extensive reworking by Tom Lane
2012-07-17 00:43:21 +02:00
|
|
|
TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
|
|
|
|
GetCurrentTimestamp(),
|
2007-08-28 05:23:44 +02:00
|
|
|
&secs, &usecs);
|
|
|
|
msecs = secs * 1000 + usecs / 1000;
|
|
|
|
usecs = usecs % 1000;
|
|
|
|
|
2014-03-12 19:26:47 +01:00
|
|
|
/*
|
|
|
|
* we loop over the lock's procLocks to gather a list of all
|
|
|
|
* holders and waiters. Thus we will be able to provide more
|
|
|
|
* detailed information for lock debugging purposes.
|
|
|
|
*
|
|
|
|
* lock->procLocks contains all processes which hold or wait for
|
|
|
|
* this lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWLockAcquire(partitionLock, LW_SHARED);
|
|
|
|
|
|
|
|
procLocks = &(lock->procLocks);
|
|
|
|
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
|
|
|
|
offsetof(PROCLOCK, lockLink));
|
|
|
|
|
|
|
|
while (proclock)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* we are a waiter if myProc->waitProcLock == proclock; we are
|
|
|
|
* a holder if it is NULL or something different
|
|
|
|
*/
|
|
|
|
if (proclock->tag.myProc->waitProcLock == proclock)
|
|
|
|
{
|
|
|
|
if (first_waiter)
|
|
|
|
{
|
|
|
|
appendStringInfo(&lock_waiters_sbuf, "%d",
|
|
|
|
proclock->tag.myProc->pid);
|
|
|
|
first_waiter = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
appendStringInfo(&lock_waiters_sbuf, ", %d",
|
|
|
|
proclock->tag.myProc->pid);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (first_holder)
|
|
|
|
{
|
|
|
|
appendStringInfo(&lock_holders_sbuf, "%d",
|
|
|
|
proclock->tag.myProc->pid);
|
|
|
|
first_holder = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
appendStringInfo(&lock_holders_sbuf, ", %d",
|
|
|
|
proclock->tag.myProc->pid);
|
|
|
|
|
|
|
|
lockHoldersNum++;
|
|
|
|
}
|
|
|
|
|
|
|
|
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
|
|
|
|
offsetof(PROCLOCK, lockLink));
|
|
|
|
}
|
|
|
|
|
|
|
|
LWLockRelease(partitionLock);
|
|
|
|
|
2007-08-28 05:23:44 +02:00
|
|
|
if (deadlock_state == DS_SOFT_DEADLOCK)
|
|
|
|
ereport(LOG,
|
|
|
|
(errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
|
2014-03-12 19:26:47 +01:00
|
|
|
MyProcPid, modename, buf.data, msecs, usecs),
|
|
|
|
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
|
|
|
|
"Processes holding the lock: %s. Wait queue: %s.",
|
|
|
|
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
|
2007-08-28 05:23:44 +02:00
|
|
|
else if (deadlock_state == DS_HARD_DEADLOCK)
|
2007-06-19 22:13:22 +02:00
|
|
|
{
|
2007-08-28 05:23:44 +02:00
|
|
|
/*
|
|
|
|
* This message is a bit redundant with the error that will be
|
|
|
|
* reported subsequently, but in some cases the error report
|
|
|
|
* might not make it to the log (eg, if it's caught by an
|
|
|
|
* exception handler), and we want to ensure all long-wait
|
|
|
|
* events get logged.
|
|
|
|
*/
|
|
|
|
ereport(LOG,
|
|
|
|
(errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
|
2014-03-12 19:26:47 +01:00
|
|
|
MyProcPid, modename, buf.data, msecs, usecs),
|
|
|
|
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
|
|
|
|
"Processes holding the lock: %s. Wait queue: %s.",
|
|
|
|
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
|
2007-06-19 22:13:22 +02:00
|
|
|
}
|
2007-08-28 05:23:44 +02:00
|
|
|
|
2020-06-17 09:14:37 +02:00
|
|
|
if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
|
2007-08-28 05:23:44 +02:00
|
|
|
ereport(LOG,
|
|
|
|
(errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
|
2014-03-12 19:26:47 +01:00
|
|
|
MyProcPid, modename, buf.data, msecs, usecs),
|
|
|
|
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
|
|
|
|
"Processes holding the lock: %s. Wait queue: %s.",
|
|
|
|
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
|
2020-06-17 09:14:37 +02:00
|
|
|
else if (myWaitStatus == PROC_WAIT_STATUS_OK)
|
2007-08-28 05:23:44 +02:00
|
|
|
ereport(LOG,
|
|
|
|
(errmsg("process %d acquired %s on %s after %ld.%03d ms",
|
|
|
|
MyProcPid, modename, buf.data, msecs, usecs)));
|
|
|
|
else
|
|
|
|
{
|
2020-06-17 09:14:37 +02:00
|
|
|
Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2007-08-28 05:23:44 +02:00
|
|
|
/*
|
|
|
|
* Currently, the deadlock checker always kicks its own
|
2020-06-17 09:14:37 +02:00
|
|
|
* process, which means that we'll only see
|
|
|
|
* PROC_WAIT_STATUS_ERROR when deadlock_state ==
|
2007-08-28 05:23:44 +02:00
|
|
|
* DS_HARD_DEADLOCK, and there's no need to print redundant
|
|
|
|
* messages. But for completeness and future-proofing, print
|
|
|
|
* a message if it looks like someone else kicked us off the
|
|
|
|
* lock.
|
|
|
|
*/
|
|
|
|
if (deadlock_state != DS_HARD_DEADLOCK)
|
|
|
|
ereport(LOG,
|
|
|
|
(errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
|
2014-03-12 19:26:47 +01:00
|
|
|
MyProcPid, modename, buf.data, msecs, usecs),
|
|
|
|
(errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
|
|
|
|
"Processes holding the lock: %s. Wait queue: %s.",
|
|
|
|
lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
|
2007-08-28 05:23:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point we might still need to wait for the lock. Reset
|
|
|
|
* state so we don't print the above messages again.
|
|
|
|
*/
|
|
|
|
deadlock_state = DS_NO_DEADLOCK;
|
|
|
|
|
|
|
|
pfree(buf.data);
|
2014-03-12 19:26:47 +01:00
|
|
|
pfree(lock_holders_sbuf.data);
|
|
|
|
pfree(lock_waiters_sbuf.data);
|
2007-06-19 22:13:22 +02:00
|
|
|
}
|
2020-06-17 09:14:37 +02:00
|
|
|
} while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1998-12-29 20:32:08 +01:00
|
|
|
/*
|
Fix assorted race conditions in the new timeout infrastructure.
Prevent handle_sig_alarm from losing control partway through due to a query
cancel (either an asynchronous SIGINT, or a cancel triggered by one of the
timeout handler functions). That would at least result in failure to
schedule any required future interrupt, and might result in actual
corruption of timeout.c's data structures, if the interrupt happened while
we were updating those.
We could still lose control if an asynchronous SIGINT arrives just as the
function is entered. This wouldn't break any data structures, but it would
have the same effect as if the SIGALRM interrupt had been silently lost:
we'd not fire any currently-due handlers, nor schedule any new interrupt.
To forestall that scenario, forcibly reschedule any pending timer interrupt
during AbortTransaction and AbortSubTransaction. We can avoid any extra
kernel call in most cases by not doing that until we've allowed
LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events.
Another hazard is that some platforms (at least Linux and *BSD) block a
signal before calling its handler and then unblock it on return. When we
longjmp out of the handler, the unblock doesn't happen, and the signal is
left blocked indefinitely. Again, we can fix that by forcibly unblocking
signals during AbortTransaction and AbortSubTransaction.
These latter two problems do not manifest when the longjmp reaches
postgres.c, because the error recovery code there kills all pending timeout
events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal
mask is restored. So errors thrown outside any transaction should be OK
already, and cleaning up in AbortTransaction and AbortSubTransaction should
be enough to fix these issues. (We're assuming that any code that catches
a query cancel error and doesn't re-throw it will do at least a
subtransaction abort to clean up; but that was pretty much required already
by other subsystems.)
Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when
disabling that event: if a lock timeout interrupt happened after the lock
was granted, the ensuing query cancel is still going to happen at the next
CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user
cancel.
Per reports from Dan Wood.
Back-patch to 9.3 where the new timeout handling infrastructure was
introduced. We may at some point decide to back-patch the signal
unblocking changes further, but I'll desist from that until we hear
actual field complaints about it.
2013-11-29 22:41:00 +01:00
|
|
|
* Disable the timers, if they are still running. As in LockErrorCleanup,
|
|
|
|
* we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
|
|
|
|
* already caused QueryCancelPending to become set, we want the cancel to
|
|
|
|
* be reported as a lock timeout, not a user cancel.
|
1998-12-29 20:32:08 +01:00
|
|
|
*/
|
2016-03-10 20:26:24 +01:00
|
|
|
if (!InHotStandby)
|
2013-03-17 04:22:17 +01:00
|
|
|
{
|
2016-03-10 20:26:24 +01:00
|
|
|
if (LockTimeout > 0)
|
|
|
|
{
|
|
|
|
DisableTimeoutParams timeouts[2];
|
2013-03-17 04:22:17 +01:00
|
|
|
|
2016-03-10 20:26:24 +01:00
|
|
|
timeouts[0].id = DEADLOCK_TIMEOUT;
|
|
|
|
timeouts[0].keep_indicator = false;
|
|
|
|
timeouts[1].id = LOCK_TIMEOUT;
|
|
|
|
timeouts[1].keep_indicator = true;
|
|
|
|
disable_timeouts(timeouts, 2);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
disable_timeout(DEADLOCK_TIMEOUT, false);
|
2013-03-17 04:22:17 +01:00
|
|
|
}
|
1998-12-29 20:32:08 +01:00
|
|
|
|
2021-01-13 14:59:17 +01:00
|
|
|
/*
|
|
|
|
* Emit the log message if recovery conflict on lock was resolved but the
|
|
|
|
* startup process waited longer than deadlock_timeout for it.
|
|
|
|
*/
|
|
|
|
if (InHotStandby && logged_recovery_conflict)
|
|
|
|
LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
|
|
|
|
standbyWaitStart, GetCurrentTimestamp(),
|
|
|
|
NULL, false);
|
|
|
|
|
2001-01-14 06:08:17 +01:00
|
|
|
/*
|
2005-12-11 22:02:18 +01:00
|
|
|
* Re-acquire the lock table's partition lock. We have to do this to hold
|
|
|
|
* off cancel/die interrupts before we can mess with lockAwaited (else we
|
|
|
|
* might have a missed or duplicated locallock update).
|
2004-07-17 05:32:14 +02:00
|
|
|
*/
|
2005-12-11 22:02:18 +01:00
|
|
|
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
|
2004-07-17 05:32:14 +02:00
|
|
|
|
|
|
|
/*
|
2012-04-18 17:17:30 +02:00
|
|
|
* We no longer want LockErrorCleanup to do anything.
|
2001-01-14 06:08:17 +01:00
|
|
|
*/
|
2005-12-11 22:02:18 +01:00
|
|
|
lockAwaited = NULL;
|
2001-01-14 06:08:17 +01:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2004-08-27 19:07:42 +02:00
|
|
|
* If we got the lock, be sure to remember it in the locallock table.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2020-06-17 09:14:37 +02:00
|
|
|
if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
|
2004-08-27 19:07:42 +02:00
|
|
|
GrantAwaitedLock();
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-01-22 23:30:06 +01:00
|
|
|
/*
|
|
|
|
* We don't have to do anything else, because the awaker did all the
|
|
|
|
* necessary update of the lock table and MyProc.
|
|
|
|
*/
|
2004-07-17 05:32:14 +02:00
|
|
|
return MyProc->waitStatus;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2019-06-21 00:57:07 +02:00
|
|
|
* ProcWakeup -- wake up a process by setting its latch.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2000-12-22 01:51:54 +01:00
|
|
|
* Also remove the process from the wait queue and set its links invalid.
|
1996-07-09 08:22:35 +02:00
|
|
|
* RETURN: the next process in the wait queue.
|
2001-09-04 04:26:57 +02:00
|
|
|
*
|
2005-12-11 22:02:18 +01:00
|
|
|
* The appropriate lock partition lock must be held by caller.
|
|
|
|
*
|
2001-09-04 04:26:57 +02:00
|
|
|
* XXX: presently, this code is only used for the "success" case, and only
|
|
|
|
* works correctly for that case. To clean up in failure case, would need
|
|
|
|
* to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
|
2020-06-17 09:14:37 +02:00
|
|
|
* Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2002-06-11 15:40:53 +02:00
|
|
|
PGPROC *
|
2020-06-17 09:14:37 +02:00
|
|
|
ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2002-06-11 15:40:53 +02:00
|
|
|
PGPROC *retProc;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-01-22 23:30:06 +01:00
|
|
|
/* Proc should be sleeping ... */
|
2008-11-02 22:24:52 +01:00
|
|
|
if (proc->links.prev == NULL ||
|
|
|
|
proc->links.next == NULL)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
2020-06-17 09:14:37 +02:00
|
|
|
Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-01-22 23:30:06 +01:00
|
|
|
/* Save next process before we zap the list link */
|
2008-11-02 22:24:52 +01:00
|
|
|
retProc = (PGPROC *) proc->links.next;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-01-22 23:30:06 +01:00
|
|
|
/* Remove process from wait queue */
|
1996-07-09 08:22:35 +02:00
|
|
|
SHMQueueDelete(&(proc->links));
|
2000-12-22 01:51:54 +01:00
|
|
|
(proc->waitLock->waitProcs.size)--;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-01-22 23:30:06 +01:00
|
|
|
/* Clean up process' state and pass it the ok/fail signal */
|
|
|
|
proc->waitLock = NULL;
|
2004-08-27 19:07:42 +02:00
|
|
|
proc->waitProcLock = NULL;
|
2004-07-17 05:32:14 +02:00
|
|
|
proc->waitStatus = waitStatus;
|
Display the time when the process started waiting for the lock, in pg_locks, take 2
This commit adds new column "waitstart" into pg_locks view. This column
reports the time when the server process started waiting for the lock
if the lock is not held. This information is useful, for example, when
examining the amount of time to wait on a lock by subtracting
"waitstart" in pg_locks from the current time, and identify the lock
that the processes are waiting for very long.
This feature uses the current time obtained for the deadlock timeout
timer as "waitstart" (i.e., the time when this process started waiting
for the lock). Since getting the current time newly can cause overhead,
we reuse the already-obtained time to avoid that overhead.
Note that "waitstart" is updated without holding the lock table's
partition lock, to avoid the overhead by additional lock acquisition.
This can cause "waitstart" in pg_locks to become NULL for a very short
period of time after the wait started even though "granted" is false.
This is OK in practice because we can assume that users are likely to
look at "waitstart" when waiting for the lock for a long time.
The first attempt of this patch (commit 3b733fcd04) caused the buildfarm
member "rorqual" (built with --disable-atomics --disable-spinlocks) to report
the failure of the regression test. It was reverted by commit 890d2182a2.
The cause of this failure was that the atomic variable for "waitstart"
in the dummy process entry created at the end of prepare transaction was
not initialized. This second attempt fixes that issue.
Bump catalog version.
Author: Atsushi Torikoshi
Reviewed-by: Ian Lawrence Barwick, Robert Haas, Justin Pryzby, Fujii Masao
Discussion: https://postgr.es/m/a96013dc51cdc56b2a2b84fa8a16a993@oss.nttdata.com
2021-02-15 07:13:37 +01:00
|
|
|
pg_atomic_write_u64(&MyProc->waitStart, 0);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-01-22 23:30:06 +01:00
|
|
|
/* And awaken it */
|
2015-02-03 23:24:38 +01:00
|
|
|
SetLatch(&proc->procLatch);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
return retProc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ProcLockWakeup -- routine for waking up processes when a lock is
|
2001-01-25 04:31:16 +01:00
|
|
|
* released (or a prior waiter is aborted). Scan all waiters
|
|
|
|
* for lock, waken any that are no longer blocked.
|
2005-12-11 22:02:18 +01:00
|
|
|
*
|
|
|
|
* The appropriate lock partition lock must be held by caller.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2001-01-25 04:31:16 +01:00
|
|
|
void
|
Try to reduce confusion about what is a lock method identifier, a lock
method control structure, or a table of control structures.
. Use type LOCKMASK where an int is not a counter.
. Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead.
. Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because
LOCKMETHOD is not a pointer.
. Define and use macro LockMethodIsValid.
. Rename LOCKMETHOD to LOCKMETHODID.
. Remove global variable LongTermTableId in lmgr.c, because it is
never used.
. Make LockTableId static in lmgr.c, because it is used nowhere else.
Why not remove it and use DEFAULT_LOCKMETHOD?
. Rename the lock method control structure from LOCKMETHODTABLE to
LockMethodData. Introduce a pointer type named LockMethod.
. Remove elog(FATAL) after InitLockTable() call in
CreateSharedMemoryAndSemaphores(), because if something goes wrong,
there is elog(FATAL) in LockMethodTableInit(), and if this doesn't
help, an elog(ERROR) in InitLockTable() is promoted to FATAL.
. Make InitLockTable() void, because its only caller does not use its
return value any more.
. Rename variables in lock.c to avoid statements like
LockMethodTable[NumLockMethods] = lockMethodTable;
lockMethodTable = LockMethodTable[lockmethod];
. Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG.
. Remove static variables BITS_OFF and BITS_ON from lock.c, because
I agree to this doubt:
* XXX is a fetch from a static array really faster than a shift?
. Define and use macros LOCKBIT_ON/OFF.
Manfred Koizar
2003-12-01 22:59:25 +01:00
|
|
|
ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2001-01-25 04:31:16 +01:00
|
|
|
PROC_QUEUE *waitQueue = &(lock->waitProcs);
|
|
|
|
int queue_size = waitQueue->size;
|
2002-06-11 15:40:53 +02:00
|
|
|
PGPROC *proc;
|
Try to reduce confusion about what is a lock method identifier, a lock
method control structure, or a table of control structures.
. Use type LOCKMASK where an int is not a counter.
. Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead.
. Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because
LOCKMETHOD is not a pointer.
. Define and use macro LockMethodIsValid.
. Rename LOCKMETHOD to LOCKMETHODID.
. Remove global variable LongTermTableId in lmgr.c, because it is
never used.
. Make LockTableId static in lmgr.c, because it is used nowhere else.
Why not remove it and use DEFAULT_LOCKMETHOD?
. Rename the lock method control structure from LOCKMETHODTABLE to
LockMethodData. Introduce a pointer type named LockMethod.
. Remove elog(FATAL) after InitLockTable() call in
CreateSharedMemoryAndSemaphores(), because if something goes wrong,
there is elog(FATAL) in LockMethodTableInit(), and if this doesn't
help, an elog(ERROR) in InitLockTable() is promoted to FATAL.
. Make InitLockTable() void, because its only caller does not use its
return value any more.
. Rename variables in lock.c to avoid statements like
LockMethodTable[NumLockMethods] = lockMethodTable;
lockMethodTable = LockMethodTable[lockmethod];
. Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG.
. Remove static variables BITS_OFF and BITS_ON from lock.c, because
I agree to this doubt:
* XXX is a fetch from a static array really faster than a shift?
. Define and use macros LOCKBIT_ON/OFF.
Manfred Koizar
2003-12-01 22:59:25 +01:00
|
|
|
LOCKMASK aheadRequests = 0;
|
1998-08-25 23:20:32 +02:00
|
|
|
|
2000-12-22 01:51:54 +01:00
|
|
|
Assert(queue_size >= 0);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-01-25 04:31:16 +01:00
|
|
|
if (queue_size == 0)
|
|
|
|
return;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2008-11-02 22:24:52 +01:00
|
|
|
proc = (PGPROC *) waitQueue->links.next;
|
1998-09-01 06:40:42 +02:00
|
|
|
|
2000-12-22 01:51:54 +01:00
|
|
|
while (queue_size-- > 0)
|
|
|
|
{
|
2001-01-25 04:31:16 +01:00
|
|
|
LOCKMODE lockmode = proc->waitLockMode;
|
1998-08-25 23:20:32 +02:00
|
|
|
|
|
|
|
/*
|
2001-01-25 04:31:16 +01:00
|
|
|
* Waken if (a) doesn't conflict with requests of earlier waiters, and
|
|
|
|
* (b) doesn't conflict with already-held locks.
|
1998-08-25 23:20:32 +02:00
|
|
|
*/
|
2002-07-19 01:06:20 +02:00
|
|
|
if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
|
2019-12-29 09:09:20 +01:00
|
|
|
!LockCheckConflicts(lockMethodTable, lockmode, lock,
|
|
|
|
proc->waitProcLock))
|
1998-08-25 23:20:32 +02:00
|
|
|
{
|
2001-01-25 04:31:16 +01:00
|
|
|
/* OK to waken */
|
2004-08-27 19:07:42 +02:00
|
|
|
GrantLock(lock, proc->waitProcLock, lockmode);
|
2020-06-17 09:14:37 +02:00
|
|
|
proc = ProcWakeup(proc, PROC_WAIT_STATUS_OK);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2001-01-25 04:31:16 +01:00
|
|
|
/*
|
|
|
|
* ProcWakeup removes proc from the lock's waiting process queue
|
|
|
|
* and returns the next proc in chain; don't use proc's next-link,
|
|
|
|
* because it's been cleared.
|
|
|
|
*/
|
1998-08-25 23:20:32 +02:00
|
|
|
}
|
2001-01-25 04:31:16 +01:00
|
|
|
else
|
2000-05-31 02:28:42 +02:00
|
|
|
{
|
2001-01-26 19:23:12 +01:00
|
|
|
/*
|
|
|
|
* Cannot wake this guy. Remember his request for later checks.
|
|
|
|
*/
|
Try to reduce confusion about what is a lock method identifier, a lock
method control structure, or a table of control structures.
. Use type LOCKMASK where an int is not a counter.
. Get rid of INVALID_TABLEID, use INVALID_LOCKMETHOD instead.
. Use INVALID_LOCKMETHOD instead of (LOCKMETHOD) NULL, because
LOCKMETHOD is not a pointer.
. Define and use macro LockMethodIsValid.
. Rename LOCKMETHOD to LOCKMETHODID.
. Remove global variable LongTermTableId in lmgr.c, because it is
never used.
. Make LockTableId static in lmgr.c, because it is used nowhere else.
Why not remove it and use DEFAULT_LOCKMETHOD?
. Rename the lock method control structure from LOCKMETHODTABLE to
LockMethodData. Introduce a pointer type named LockMethod.
. Remove elog(FATAL) after InitLockTable() call in
CreateSharedMemoryAndSemaphores(), because if something goes wrong,
there is elog(FATAL) in LockMethodTableInit(), and if this doesn't
help, an elog(ERROR) in InitLockTable() is promoted to FATAL.
. Make InitLockTable() void, because its only caller does not use its
return value any more.
. Rename variables in lock.c to avoid statements like
LockMethodTable[NumLockMethods] = lockMethodTable;
lockMethodTable = LockMethodTable[lockmethod];
. Change LOCKMETHODID type to uint16 to fit into struct LOCKTAG.
. Remove static variables BITS_OFF and BITS_ON from lock.c, because
I agree to this doubt:
* XXX is a fetch from a static array really faster than a shift?
. Define and use macros LOCKBIT_ON/OFF.
Manfred Koizar
2003-12-01 22:59:25 +01:00
|
|
|
aheadRequests |= LOCKBIT_ON(lockmode);
|
2008-11-02 22:24:52 +01:00
|
|
|
proc = (PGPROC *) proc->links.next;
|
2000-05-31 02:28:42 +02:00
|
|
|
}
|
1998-08-25 23:20:32 +02:00
|
|
|
}
|
2001-01-25 04:31:16 +01:00
|
|
|
|
|
|
|
Assert(waitQueue->size >= 0);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2005-12-11 22:02:18 +01:00
|
|
|
/*
|
|
|
|
* CheckDeadLock
|
|
|
|
*
|
2015-02-03 23:24:38 +01:00
|
|
|
* We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
|
|
|
|
* lock to be released by some other process. Check if there's a deadlock; if
|
|
|
|
* not, just return. (But signal ProcSleep to log a message, if
|
|
|
|
* log_lock_waits is true.) If we have a real deadlock, remove ourselves from
|
|
|
|
* the lock's wait queue and signal an error to ProcSleep.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2015-02-03 23:24:38 +01:00
|
|
|
static void
|
2002-07-13 03:02:14 +02:00
|
|
|
CheckDeadLock(void)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2005-12-11 22:02:18 +01:00
|
|
|
int i;
|
|
|
|
|
2001-01-14 06:08:17 +01:00
|
|
|
/*
|
2005-12-11 22:02:18 +01:00
|
|
|
* Acquire exclusive lock on the entire shared lock data structures. Must
|
|
|
|
* grab LWLocks in partition-number order to avoid LWLock deadlock.
|
|
|
|
*
|
|
|
|
* Note that the deadlock check interrupt had better not be enabled
|
|
|
|
* anywhere that this process itself holds lock partition locks, else this
|
|
|
|
* will wait forever. Also note that LWLockAcquire creates a critical
|
|
|
|
* section, so that this routine cannot be interrupted by cancel/die
|
|
|
|
* interrupts.
|
2001-01-14 06:08:17 +01:00
|
|
|
*/
|
2005-12-11 22:02:18 +01:00
|
|
|
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
|
2014-01-27 17:07:44 +01:00
|
|
|
LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
|
|
|
* Check to see if we've been awoken by anyone in the interim.
|
|
|
|
*
|
2008-06-09 20:23:05 +02:00
|
|
|
* If we have, we can return and resume our transaction -- happy day.
|
|
|
|
* Before we are awoken the process releasing the lock grants it to us so
|
|
|
|
* we know that we don't have to wait anymore.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2000-11-29 00:27:57 +01:00
|
|
|
* We check by looking to see if we've been unlinked from the wait queue.
|
2019-06-21 00:57:07 +02:00
|
|
|
* This is safe because we hold the lock partition lock.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2008-11-02 22:24:52 +01:00
|
|
|
if (MyProc->links.prev == NULL ||
|
|
|
|
MyProc->links.next == NULL)
|
2007-06-19 22:13:22 +02:00
|
|
|
goto check_done;
|
|
|
|
|
|
|
|
#ifdef LOCK_DEBUG
|
|
|
|
if (Debug_deadlocks)
|
|
|
|
DumpAllLocks();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Run the deadlock check, and set deadlock_state for use by ProcSleep */
|
|
|
|
deadlock_state = DeadLockCheck(MyProc);
|
|
|
|
|
2007-03-03 19:46:40 +01:00
|
|
|
if (deadlock_state == DS_HARD_DEADLOCK)
|
1998-01-27 04:00:43 +01:00
|
|
|
{
|
2007-03-03 19:46:40 +01:00
|
|
|
/*
|
|
|
|
* Oops. We have a deadlock.
|
|
|
|
*
|
2007-06-19 22:13:22 +02:00
|
|
|
* Get this process out of wait state. (Note: we could do this more
|
|
|
|
* efficiently by relying on lockAwaited, but use this coding to
|
|
|
|
* preserve the flexibility to kill some other transaction than the
|
|
|
|
* one detecting the deadlock.)
|
2007-03-03 19:46:40 +01:00
|
|
|
*
|
2020-06-17 09:14:37 +02:00
|
|
|
* RemoveFromWaitQueue sets MyProc->waitStatus to
|
2007-06-19 22:13:22 +02:00
|
|
|
* PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
|
|
|
|
* return from the signal handler.
|
2007-03-03 19:46:40 +01:00
|
|
|
*/
|
|
|
|
Assert(MyProc->waitLock != NULL);
|
|
|
|
RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
|
2001-01-14 06:08:17 +01:00
|
|
|
|
2007-03-03 19:46:40 +01:00
|
|
|
/*
|
2007-06-19 22:13:22 +02:00
|
|
|
* We're done here. Transaction abort caused by the error that
|
|
|
|
* ProcSleep will raise will cause any other locks we hold to be
|
|
|
|
* released, thus allowing other processes to wake up; we don't need
|
|
|
|
* to do that here. NOTE: an exception is that releasing locks we
|
|
|
|
* hold doesn't consider the possibility of waiters that were blocked
|
|
|
|
* behind us on the lock we just failed to get, and might now be
|
|
|
|
* wakable because we're not in front of them anymore. However,
|
|
|
|
* RemoveFromWaitQueue took care of waking up any such processes.
|
2007-03-03 19:46:40 +01:00
|
|
|
*/
|
|
|
|
}
|
2005-12-11 22:02:18 +01:00
|
|
|
|
|
|
|
/*
|
2007-07-16 23:09:50 +02:00
|
|
|
* And release locks. We do this in reverse order for two reasons: (1)
|
|
|
|
* Anyone else who needs more than one of the locks will be trying to lock
|
|
|
|
* them in increasing order; we don't want to release the other process
|
|
|
|
* until it can get all the locks it needs. (2) This avoids O(N^2)
|
|
|
|
* behavior inside LWLockRelease.
|
2005-12-11 22:02:18 +01:00
|
|
|
*/
|
2007-06-19 22:13:22 +02:00
|
|
|
check_done:
|
2005-12-11 22:02:18 +01:00
|
|
|
for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
|
2014-01-27 17:07:44 +01:00
|
|
|
LWLockRelease(LockHashPartitionLockByIndex(i));
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2015-02-03 23:24:38 +01:00
|
|
|
/*
|
|
|
|
* CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
|
|
|
|
*
|
|
|
|
* NB: Runs inside a signal handler, be careful.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
CheckDeadLockAlert(void)
|
|
|
|
{
|
|
|
|
int save_errno = errno;
|
|
|
|
|
|
|
|
got_deadlock_timeout = true;
|
2015-05-24 03:35:49 +02:00
|
|
|
|
2015-02-03 23:24:38 +01:00
|
|
|
/*
|
|
|
|
* Have to set the latch again, even if handle_sig_alarm already did. Back
|
|
|
|
* then got_deadlock_timeout wasn't yet set... It's unlikely that this
|
|
|
|
* ever would be a problem, but setting a set latch again is cheap.
|
Detect the deadlocks between backends and the startup process.
The deadlocks that the recovery conflict on lock is involved in can
happen between hot-standby backends and the startup process.
If a backend takes an access exclusive lock on the table and which
finally triggers the deadlock, that deadlock can be detected
as expected. On the other hand, previously, if the startup process
took an access exclusive lock and which finally triggered the deadlock,
that deadlock could not be detected and could remain even after
deadlock_timeout passed. This is a bug.
The cause of this bug was that the code for handling the recovery
conflict on lock didn't take care of deadlock case at all. It assumed
that deadlocks involving the startup process and backends were able
to be detected by the deadlock detector invoked within backends.
But this assumption was incorrect. The startup process also should
have invoked the deadlock detector if necessary.
To fix this bug, this commit makes the startup process invoke
the deadlock detector if deadlock_timeout is reached while handling
the recovery conflict on lock. Specifically, in that case, the startup
process requests all the backends holding the conflicting locks to
check themselves for deadlocks.
Back-patch to v9.6. v9.5 has also this bug, but per discussion we decided
not to back-patch the fix to v9.5. Because v9.5 doesn't have some
infrastructure codes (e.g., 37c54863cf) that this bug fix patch depends on.
We can apply those codes for the back-patch, but since the next minor
version release is the final one for v9.5, it's risky to do that. If we
unexpectedly introduce new bug to v9.5 by the back-patch, there is no
chance to fix that. We determined that the back-patch to v9.5 would give
more risk than gain.
Author: Fujii Masao
Reviewed-by: Bertrand Drouvot, Masahiko Sawada, Kyotaro Horiguchi
Discussion: https://postgr.es/m/4041d6b6-cf24-a120-36fa-1294220f8243@oss.nttdata.com
2021-01-06 04:39:18 +01:00
|
|
|
*
|
|
|
|
* Note that, when this function runs inside procsignal_sigusr1_handler(),
|
|
|
|
* the handler function sets the latch again after the latch is set here.
|
2015-02-03 23:24:38 +01:00
|
|
|
*/
|
|
|
|
SetLatch(MyLatch);
|
|
|
|
errno = save_errno;
|
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2001-07-06 23:04:26 +02:00
|
|
|
/*
|
|
|
|
* ProcWaitForSignal - wait for a signal from another backend.
|
|
|
|
*
|
2015-02-03 23:24:38 +01:00
|
|
|
* As this uses the generic process latch the caller has to be robust against
|
|
|
|
* unrelated wakeups: Always check that the desired state has occurred, and
|
|
|
|
* wait again if not.
|
2001-07-06 23:04:26 +02:00
|
|
|
*/
|
|
|
|
void
|
2016-10-04 16:50:13 +02:00
|
|
|
ProcWaitForSignal(uint32 wait_event_info)
|
2001-07-06 23:04:26 +02:00
|
|
|
{
|
Add WL_EXIT_ON_PM_DEATH pseudo-event.
Users of the WaitEventSet and WaitLatch() APIs can now choose between
asking for WL_POSTMASTER_DEATH and then handling it explicitly, or asking
for WL_EXIT_ON_PM_DEATH to trigger immediate exit on postmaster death.
This reduces code duplication, since almost all callers want the latter.
Repair all code that was previously ignoring postmaster death completely,
or requesting the event but ignoring it, or requesting the event but then
doing an unconditional PostmasterIsAlive() call every time through its
event loop (which is an expensive syscall on platforms for which we don't
have USE_POSTMASTER_DEATH_SIGNAL support).
Assert that callers of WaitLatchXXX() under the postmaster remember to
ask for either WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH, to prevent
future bugs.
The only process that doesn't handle postmaster death is syslogger. It
waits until all backends holding the write end of the syslog pipe
(including the postmaster) have closed it by exiting, to be sure to
capture any parting messages. By using the WaitEventSet API directly
it avoids the new assertion, and as a by-product it may be slightly
more efficient on platforms that have epoll().
Author: Thomas Munro
Reviewed-by: Kyotaro Horiguchi, Heikki Linnakangas, Tom Lane
Discussion: https://postgr.es/m/CAEepm%3D1TCviRykkUb69ppWLr_V697rzd1j3eZsRMmbXvETfqbQ%40mail.gmail.com,
https://postgr.es/m/CAEepm=2LqHzizbe7muD7-2yHUbTOoF7Q+qkSD5Q41kuhttRTwA@mail.gmail.com
2018-11-23 08:16:41 +01:00
|
|
|
(void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
|
|
|
|
wait_event_info);
|
2015-02-03 23:24:38 +01:00
|
|
|
ResetLatch(MyLatch);
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
2001-07-06 23:04:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-12-16 00:40:15 +01:00
|
|
|
* ProcSendSignal - set the latch of a backend identified by pgprocno
|
2001-07-06 23:04:26 +02:00
|
|
|
*/
|
|
|
|
void
|
2021-12-16 00:40:15 +01:00
|
|
|
ProcSendSignal(int pgprocno)
|
2001-07-06 23:04:26 +02:00
|
|
|
{
|
2021-12-16 00:40:15 +01:00
|
|
|
if (pgprocno < 0 || pgprocno >= ProcGlobal->allProcCount)
|
|
|
|
elog(ERROR, "pgprocno out of range");
|
Allow read only connections during recovery, known as Hot Standby.
Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record.
New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far.
This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required.
Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit.
Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 02:32:45 +01:00
|
|
|
|
2021-12-16 00:40:15 +01:00
|
|
|
SetLatch(&ProcGlobal->allProcs[pgprocno].procLatch);
|
2001-07-06 23:04:26 +02:00
|
|
|
}
|
2016-02-07 16:16:13 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* BecomeLockGroupLeader - designate process as lock group leader
|
|
|
|
*
|
|
|
|
* Once this function has returned, other processes can join the lock group
|
|
|
|
* by calling BecomeLockGroupMember.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
BecomeLockGroupLeader(void)
|
|
|
|
{
|
|
|
|
LWLock *leader_lwlock;
|
|
|
|
|
|
|
|
/* If we already did it, we don't need to do it again. */
|
|
|
|
if (MyProc->lockGroupLeader == MyProc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We had better not be a follower. */
|
|
|
|
Assert(MyProc->lockGroupLeader == NULL);
|
|
|
|
|
|
|
|
/* Create single-member group, containing only ourselves. */
|
|
|
|
leader_lwlock = LockHashPartitionLockByProc(MyProc);
|
|
|
|
LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
|
|
|
|
MyProc->lockGroupLeader = MyProc;
|
|
|
|
dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
|
|
|
|
LWLockRelease(leader_lwlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* BecomeLockGroupMember - designate process as lock group member
|
|
|
|
*
|
|
|
|
* This is pretty straightforward except for the possibility that the leader
|
|
|
|
* whose group we're trying to join might exit before we manage to do so;
|
|
|
|
* and the PGPROC might get recycled for an unrelated process. To avoid
|
|
|
|
* that, we require the caller to pass the PID of the intended PGPROC as
|
|
|
|
* an interlock. Returns true if we successfully join the intended lock
|
|
|
|
* group, and false if not.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
BecomeLockGroupMember(PGPROC *leader, int pid)
|
|
|
|
{
|
|
|
|
LWLock *leader_lwlock;
|
|
|
|
bool ok = false;
|
|
|
|
|
|
|
|
/* Group leader can't become member of group */
|
|
|
|
Assert(MyProc != leader);
|
|
|
|
|
2016-02-22 17:20:35 +01:00
|
|
|
/* Can't already be a member of a group */
|
|
|
|
Assert(MyProc->lockGroupLeader == NULL);
|
|
|
|
|
2016-02-07 16:16:13 +01:00
|
|
|
/* PID must be valid. */
|
|
|
|
Assert(pid != 0);
|
|
|
|
|
2016-02-21 11:12:02 +01:00
|
|
|
/*
|
|
|
|
* Get lock protecting the group fields. Note LockHashPartitionLockByProc
|
|
|
|
* accesses leader->pgprocno in a PGPROC that might be free. This is safe
|
|
|
|
* because all PGPROCs' pgprocno fields are set during shared memory
|
|
|
|
* initialization and never change thereafter; so we will acquire the
|
|
|
|
* correct lock even if the leader PGPROC is in process of being recycled.
|
|
|
|
*/
|
2016-02-21 12:36:41 +01:00
|
|
|
leader_lwlock = LockHashPartitionLockByProc(leader);
|
2016-02-07 16:16:13 +01:00
|
|
|
LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
|
2016-02-21 11:12:02 +01:00
|
|
|
|
2016-02-22 17:20:35 +01:00
|
|
|
/* Is this the leader we're looking for? */
|
|
|
|
if (leader->pid == pid && leader->lockGroupLeader == leader)
|
2016-02-07 16:16:13 +01:00
|
|
|
{
|
2016-02-22 17:20:35 +01:00
|
|
|
/* OK, join the group */
|
2016-02-07 16:16:13 +01:00
|
|
|
ok = true;
|
|
|
|
MyProc->lockGroupLeader = leader;
|
|
|
|
dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
|
|
|
|
}
|
|
|
|
LWLockRelease(leader_lwlock);
|
|
|
|
|
|
|
|
return ok;
|
|
|
|
}
|