2001-09-29 06:02:27 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* lwlock.h
|
|
|
|
* Lightweight lock manager
|
|
|
|
*
|
|
|
|
*
|
2023-01-02 21:00:37 +01:00
|
|
|
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
2001-09-29 06:02:27 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/include/storage/lwlock.h
|
2001-09-29 06:02:27 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#ifndef LWLOCK_H
|
|
|
|
#define LWLOCK_H
|
|
|
|
|
2015-08-07 15:10:56 +02:00
|
|
|
#ifdef FRONTEND
|
|
|
|
#error "lwlock.h may not be included from frontend code"
|
|
|
|
#endif
|
|
|
|
|
2019-11-25 03:38:57 +01:00
|
|
|
#include "port/atomics.h"
|
2016-08-16 00:09:55 +02:00
|
|
|
#include "storage/proclist_types.h"
|
2014-01-27 17:07:44 +01:00
|
|
|
|
|
|
|
struct PGPROC;
|
|
|
|
|
2022-11-20 20:56:32 +01:00
|
|
|
/* what state of the wait process is a backend in */
|
|
|
|
typedef enum LWLockWaitState
|
|
|
|
{
|
2023-05-19 23:24:48 +02:00
|
|
|
LW_WS_NOT_WAITING, /* not currently waiting / woken up */
|
|
|
|
LW_WS_WAITING, /* currently waiting */
|
|
|
|
LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet
|
|
|
|
* signalled */
|
|
|
|
} LWLockWaitState;
|
2022-11-20 20:56:32 +01:00
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/*
|
|
|
|
* Code outside of lwlock.c should not manipulate the contents of this
|
|
|
|
* structure directly, but we have to declare it here to allow LWLocks to be
|
|
|
|
* incorporated into other data structures.
|
|
|
|
*/
|
|
|
|
typedef struct LWLock
|
|
|
|
{
|
2014-12-25 17:24:30 +01:00
|
|
|
uint16 tranche; /* tranche ID */
|
2015-09-05 10:35:49 +02:00
|
|
|
pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */
|
2016-08-16 00:09:55 +02:00
|
|
|
proclist_head waiters; /* list of waiting PGPROCs */
|
2014-12-25 17:24:30 +01:00
|
|
|
#ifdef LOCK_DEBUG
|
2016-04-11 05:12:32 +02:00
|
|
|
pg_atomic_uint32 nwaiters; /* number of waiters */
|
2015-09-05 10:35:49 +02:00
|
|
|
struct PGPROC *owner; /* last exclusive owner of the lock */
|
2014-12-25 17:24:30 +01:00
|
|
|
#endif
|
2014-01-27 17:07:44 +01:00
|
|
|
} LWLock;
|
|
|
|
|
|
|
|
/*
|
2015-12-15 19:32:54 +01:00
|
|
|
* In most cases, it's desirable to force each tranche of LWLocks to be aligned
|
|
|
|
* on a cache line boundary and make the array stride a power of 2. This saves
|
|
|
|
* a few cycles in indexing, but more importantly ensures that individual
|
|
|
|
* LWLocks don't cross cache line boundaries. This reduces cache contention
|
|
|
|
* problems, especially on AMD Opterons. In some cases, it's useful to add
|
|
|
|
* even more padding so that each LWLock takes up an entire cache line; this is
|
|
|
|
* useful, for example, in the main LWLock array, where the overall number of
|
|
|
|
* locks is small but some are heavily contended.
|
2014-01-27 17:07:44 +01:00
|
|
|
*/
|
2015-12-15 19:32:54 +01:00
|
|
|
#define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE
|
2014-01-27 17:07:44 +01:00
|
|
|
|
2022-12-08 14:30:01 +01:00
|
|
|
StaticAssertDecl(sizeof(LWLock) <= LWLOCK_PADDED_SIZE,
|
|
|
|
"Miscalculated LWLock padding");
|
|
|
|
|
2015-12-15 19:32:54 +01:00
|
|
|
/* LWLock, padded to a full cache line size */
|
2014-01-27 17:07:44 +01:00
|
|
|
typedef union LWLockPadded
|
|
|
|
{
|
|
|
|
LWLock lock;
|
|
|
|
char pad[LWLOCK_PADDED_SIZE];
|
|
|
|
} LWLockPadded;
|
2015-12-15 19:32:54 +01:00
|
|
|
|
2014-02-17 02:12:43 +01:00
|
|
|
extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
|
2014-01-27 17:07:44 +01:00
|
|
|
|
2016-02-04 22:43:04 +01:00
|
|
|
/* struct for storing named tranche information */
|
|
|
|
typedef struct NamedLWLockTranche
|
|
|
|
{
|
|
|
|
int trancheId;
|
2016-12-16 17:29:23 +01:00
|
|
|
char *trancheName;
|
2016-02-04 22:43:04 +01:00
|
|
|
} NamedLWLockTranche;
|
|
|
|
|
|
|
|
extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray;
|
|
|
|
extern PGDLLIMPORT int NamedLWLockTrancheRequests;
|
|
|
|
|
2015-09-11 19:58:28 +02:00
|
|
|
/* Names for fixed lwlocks */
|
2015-09-11 20:53:41 +02:00
|
|
|
#include "storage/lwlocknames.h"
|
2014-01-27 17:07:44 +01:00
|
|
|
|
2006-07-23 05:07:58 +02:00
|
|
|
/*
|
|
|
|
* It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
|
2014-01-27 17:07:44 +01:00
|
|
|
* here, but we need them to figure out offsets within MainLWLockArray, and
|
|
|
|
* having this file include lock.h or bufmgr.h would be backwards.
|
2006-07-23 05:07:58 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Number of partitions of the shared buffer mapping hashtable */
|
2014-10-02 19:58:50 +02:00
|
|
|
#define NUM_BUFFER_PARTITIONS 128
|
2006-07-23 05:07:58 +02:00
|
|
|
|
|
|
|
/* Number of partitions the shared lock tables are divided into */
|
2006-07-24 01:08:46 +02:00
|
|
|
#define LOG2_NUM_LOCK_PARTITIONS 4
|
|
|
|
#define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
|
2006-07-23 05:07:58 +02:00
|
|
|
|
Implement genuine serializable isolation level.
Until now, our Serializable mode has in fact been what's called Snapshot
Isolation, which allows some anomalies that could not occur in any
serialized ordering of the transactions. This patch fixes that using a
method called Serializable Snapshot Isolation, based on research papers by
Michael J. Cahill (see README-SSI for full references). In Serializable
Snapshot Isolation, transactions run like they do in Snapshot Isolation,
but a predicate lock manager observes the reads and writes performed and
aborts transactions if it detects that an anomaly might occur. This method
produces some false positives, ie. it sometimes aborts transactions even
though there is no anomaly.
To track reads we implement predicate locking, see storage/lmgr/predicate.c.
Whenever a tuple is read, a predicate lock is acquired on the tuple. Shared
memory is finite, so when a transaction takes many tuple-level locks on a
page, the locks are promoted to a single page-level lock, and further to a
single relation level lock if necessary. To lock key values with no matching
tuple, a sequential scan always takes a relation-level lock, and an index
scan acquires a page-level lock that covers the search key, whether or not
there are any matching keys at the moment.
A predicate lock doesn't conflict with any regular locks or with another
predicate locks in the normal sense. They're only used by the predicate lock
manager to detect the danger of anomalies. Only serializable transactions
participate in predicate locking, so there should be no extra overhead for
for other transactions.
Predicate locks can't be released at commit, but must be remembered until
all the transactions that overlapped with it have completed. That means that
we need to remember an unbounded amount of predicate locks, so we apply a
lossy but conservative method of tracking locks for committed transactions.
If we run short of shared memory, we overflow to a new "pg_serial" SLRU
pool.
We don't currently allow Serializable transactions in Hot Standby mode.
That would be hard, because even read-only transactions can cause anomalies
that wouldn't otherwise occur.
Serializable isolation mode now means the new fully serializable level.
Repeatable Read gives you the old Snapshot Isolation level that we have
always had.
Kevin Grittner and Dan Ports, reviewed by Jeff Davis, Heikki Linnakangas and
Anssi Kääriäinen
2011-02-07 22:46:51 +01:00
|
|
|
/* Number of partitions the shared predicate lock tables are divided into */
|
|
|
|
#define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
|
|
|
|
#define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
|
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/* Offsets for various chunks of preallocated lwlocks. */
|
|
|
|
#define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
|
|
|
|
#define LOCK_MANAGER_LWLOCK_OFFSET \
|
|
|
|
(BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
|
|
|
|
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
|
2014-06-19 15:40:37 +02:00
|
|
|
(LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS)
|
2014-01-27 17:07:44 +01:00
|
|
|
#define NUM_FIXED_LWLOCKS \
|
|
|
|
(PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
typedef enum LWLockMode
|
|
|
|
{
|
|
|
|
LW_EXCLUSIVE,
|
Make group commit more effective.
When a backend needs to flush the WAL, and someone else is already flushing
the WAL, wait until it releases the WALInsertLock and check if we still need
to do the flush or if the other backend already did the work for us, before
acquiring WALInsertLock. This helps group commit, because when the WAL flush
finishes, all the backends that were waiting for it can be woken up in one
go, and the can all concurrently observe that they're done, rather than
waking them up one by one in a cascading fashion.
This is based on a new LWLock function, LWLockWaitUntilFree(), which has
peculiar semantics. If the lock is immediately free, it grabs the lock and
returns true. If it's not free, it waits until it is released, but then
returns false without grabbing the lock. This is used in XLogFlush(), so
that when the lock is acquired, the backend flushes the WAL, but if it's
not, the backend first checks the current flush location before retrying.
Original patch and benchmarking by Peter Geoghegan and Simon Riggs, although
this patch as committed ended up being very different from that.
2012-01-30 15:40:58 +01:00
|
|
|
LW_SHARED,
|
2019-07-29 05:28:30 +02:00
|
|
|
LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwWaitMode,
|
Make group commit more effective.
When a backend needs to flush the WAL, and someone else is already flushing
the WAL, wait until it releases the WALInsertLock and check if we still need
to do the flush or if the other backend already did the work for us, before
acquiring WALInsertLock. This helps group commit, because when the WAL flush
finishes, all the backends that were waiting for it can be woken up in one
go, and the can all concurrently observe that they're done, rather than
waking them up one by one in a cascading fashion.
This is based on a new LWLock function, LWLockWaitUntilFree(), which has
peculiar semantics. If the lock is immediately free, it grabs the lock and
returns true. If it's not free, it waits until it is released, but then
returns false without grabbing the lock. This is used in XLogFlush(), so
that when the lock is acquired, the backend flushes the WAL, but if it's
not, the backend first checks the current flush location before retrying.
Original patch and benchmarking by Peter Geoghegan and Simon Riggs, although
this patch as committed ended up being very different from that.
2012-01-30 15:40:58 +01:00
|
|
|
* when waiting for lock to become free. Not
|
|
|
|
* to be used as LWLockAcquire argument */
|
2001-09-29 06:02:27 +02:00
|
|
|
} LWLockMode;
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef LOCK_DEBUG
|
2022-04-08 14:16:38 +02:00
|
|
|
extern PGDLLIMPORT bool Trace_lwlocks;
|
2001-09-29 06:02:27 +02:00
|
|
|
#endif
|
|
|
|
|
2014-03-21 15:06:08 +01:00
|
|
|
extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
|
2014-01-27 17:07:44 +01:00
|
|
|
extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
|
|
|
|
extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
|
|
|
|
extern void LWLockRelease(LWLock *lock);
|
2015-07-31 20:20:43 +02:00
|
|
|
extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
|
2001-09-29 06:02:27 +02:00
|
|
|
extern void LWLockReleaseAll(void);
|
2014-01-27 17:07:44 +01:00
|
|
|
extern bool LWLockHeldByMe(LWLock *lock);
|
2022-07-11 04:47:16 +02:00
|
|
|
extern bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride);
|
2016-09-05 11:38:08 +02:00
|
|
|
extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
|
2001-09-29 06:02:27 +02:00
|
|
|
|
2014-03-21 15:06:08 +01:00
|
|
|
extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
|
2022-09-20 04:18:36 +02:00
|
|
|
extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val);
|
2014-03-21 15:06:08 +01:00
|
|
|
|
2005-08-21 01:26:37 +02:00
|
|
|
extern Size LWLockShmemSize(void);
|
2001-09-29 06:02:27 +02:00
|
|
|
extern void CreateLWLocks(void);
|
2014-06-30 09:13:48 +02:00
|
|
|
extern void InitLWLockAccess(void);
|
2001-10-28 07:26:15 +01:00
|
|
|
|
2016-10-04 16:50:13 +02:00
|
|
|
extern const char *GetLWLockIdentifier(uint32 classId, uint16 eventId);
|
2016-03-10 18:44:09 +01:00
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/*
|
2016-02-04 22:43:04 +01:00
|
|
|
* Extensions (or core code) can obtain an LWLocks by calling
|
|
|
|
* RequestNamedLWLockTranche() during postmaster startup. Subsequently,
|
2016-02-05 13:56:59 +01:00
|
|
|
* call GetNamedLWLockTranche() to obtain a pointer to an array containing
|
|
|
|
* the number of LWLocks requested.
|
2014-01-27 17:07:44 +01:00
|
|
|
*/
|
2016-02-04 22:43:04 +01:00
|
|
|
extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks);
|
|
|
|
extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name);
|
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/*
|
|
|
|
* There is another, more flexible method of obtaining lwlocks. First, call
|
|
|
|
* LWLockNewTrancheId just once to obtain a tranche ID; this allocates from
|
|
|
|
* a shared counter. Next, each individual process using the tranche should
|
2016-12-16 17:29:23 +01:00
|
|
|
* call LWLockRegisterTranche() to associate that tranche ID with a name.
|
|
|
|
* Finally, LWLockInitialize should be called just once per lwlock, passing
|
|
|
|
* the tranche ID as an argument.
|
2014-01-27 17:07:44 +01:00
|
|
|
*
|
|
|
|
* It may seem strange that each process using the tranche must register it
|
|
|
|
* separately, but dynamic shared memory segments aren't guaranteed to be
|
|
|
|
* mapped at the same address in all coordinating backends, so storing the
|
|
|
|
* registration in the main shared memory segment wouldn't work for that case.
|
|
|
|
*/
|
|
|
|
extern int LWLockNewTrancheId(void);
|
2017-10-31 15:34:31 +01:00
|
|
|
extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name);
|
2014-10-27 09:45:57 +01:00
|
|
|
extern void LWLockInitialize(LWLock *lock, int tranche_id);
|
2014-01-27 17:07:44 +01:00
|
|
|
|
2015-12-15 17:32:13 +01:00
|
|
|
/*
|
2016-12-16 17:29:23 +01:00
|
|
|
* Every tranche ID less than NUM_INDIVIDUAL_LWLOCKS is reserved; also,
|
|
|
|
* we reserve additional tranche IDs for builtin tranches not included in
|
|
|
|
* the set of individual LWLocks. A call to LWLockNewTrancheId will never
|
|
|
|
* return a value less than LWTRANCHE_FIRST_USER_DEFINED.
|
2015-12-15 17:32:13 +01:00
|
|
|
*/
|
|
|
|
typedef enum BuiltinTrancheIds
|
|
|
|
{
|
Rename SLRU structures and associated LWLocks.
Originally, the names assigned to SLRUs had no purpose other than
being shmem lookup keys, so not a lot of thought went into them.
As of v13, though, we're exposing them in the pg_stat_slru view and
the pg_stat_reset_slru function, so it seems advisable to take a bit
more care. Rename them to names based on the associated on-disk
storage directories (which fortunately we *did* think about, to some
extent; since those are also visible to DBAs, consistency seems like
a good thing). Also rename the associated LWLocks, since those names
are likewise user-exposed now as wait event names.
For the most part I only touched symbols used in the respective modules'
SimpleLruInit() calls, not the names of other related objects. This
renaming could have been taken further, and maybe someday we will do so.
But for now it seems undesirable to change the names of any globally
visible functions or structs, so some inconsistency is unavoidable.
(But I *did* terminate "oldserxid" with prejudice, as I found that
name both unreadable and not descriptive of the SLRU's contents.)
Table 27.12 needs re-alphabetization now, but I'll leave that till
after the other LWLock renamings I have in mind.
Discussion: https://postgr.es/m/28683.1589405363@sss.pgh.pa.us
2020-05-15 20:28:19 +02:00
|
|
|
LWTRANCHE_XACT_BUFFER = NUM_INDIVIDUAL_LWLOCKS,
|
|
|
|
LWTRANCHE_COMMITTS_BUFFER,
|
|
|
|
LWTRANCHE_SUBTRANS_BUFFER,
|
|
|
|
LWTRANCHE_MULTIXACTOFFSET_BUFFER,
|
|
|
|
LWTRANCHE_MULTIXACTMEMBER_BUFFER,
|
|
|
|
LWTRANCHE_NOTIFY_BUFFER,
|
|
|
|
LWTRANCHE_SERIAL_BUFFER,
|
2015-12-15 17:32:13 +01:00
|
|
|
LWTRANCHE_WAL_INSERT,
|
2015-12-15 19:32:54 +01:00
|
|
|
LWTRANCHE_BUFFER_CONTENT,
|
2020-05-16 00:11:03 +02:00
|
|
|
LWTRANCHE_REPLICATION_ORIGIN_STATE,
|
|
|
|
LWTRANCHE_REPLICATION_SLOT_IO,
|
|
|
|
LWTRANCHE_LOCK_FASTPATH,
|
2016-02-11 20:07:33 +01:00
|
|
|
LWTRANCHE_BUFFER_MAPPING,
|
|
|
|
LWTRANCHE_LOCK_MANAGER,
|
|
|
|
LWTRANCHE_PREDICATE_LOCK_MANAGER,
|
Add parallel-aware hash joins.
Introduce parallel-aware hash joins that appear in EXPLAIN plans as Parallel
Hash Join with Parallel Hash. While hash joins could already appear in
parallel queries, they were previously always parallel-oblivious and had a
partial subplan only on the outer side, meaning that the work of the inner
subplan was duplicated in every worker.
After this commit, the planner will consider using a partial subplan on the
inner side too, using the Parallel Hash node to divide the work over the
available CPU cores and combine its results in shared memory. If the join
needs to be split into multiple batches in order to respect work_mem, then
workers process different batches as much as possible and then work together
on the remaining batches.
The advantages of a parallel-aware hash join over a parallel-oblivious hash
join used in a parallel query are that it:
* avoids wasting memory on duplicated hash tables
* avoids wasting disk space on duplicated batch files
* divides the work of building the hash table over the CPUs
One disadvantage is that there is some communication between the participating
CPUs which might outweigh the benefits of parallelism in the case of small
hash tables. This is avoided by the planner's existing reluctance to supply
partial plans for small scans, but it may be necessary to estimate
synchronization costs in future if that situation changes. Another is that
outer batch 0 must be written to disk if multiple batches are required.
A potential future advantage of parallel-aware hash joins is that right and
full outer joins could be supported, since there is a single set of matched
bits for each hashtable, but that is not yet implemented.
A new GUC enable_parallel_hash is defined to control the feature, defaulting
to on.
Author: Thomas Munro
Reviewed-By: Andres Freund, Robert Haas
Tested-By: Rafia Sabih, Prabhat Sahu
Discussion:
https://postgr.es/m/CAEepm=2W=cOkiZxcg6qiFQP-dHUe09aqTrEMM7yJDrHMhDv_RA@mail.gmail.com
https://postgr.es/m/CAEepm=37HKyJ4U6XOLi=JgfSHM3o6B-GaeO-6hkOmneTDkH+Uw@mail.gmail.com
2017-12-21 08:39:21 +01:00
|
|
|
LWTRANCHE_PARALLEL_HASH_JOIN,
|
2016-12-19 22:47:15 +01:00
|
|
|
LWTRANCHE_PARALLEL_QUERY_DSA,
|
2020-05-16 00:11:03 +02:00
|
|
|
LWTRANCHE_PER_SESSION_DSA,
|
|
|
|
LWTRANCHE_PER_SESSION_RECORD_TYPE,
|
|
|
|
LWTRANCHE_PER_SESSION_RECORD_TYPMOD,
|
2017-12-18 23:23:19 +01:00
|
|
|
LWTRANCHE_SHARED_TUPLESTORE,
|
2020-05-16 00:11:03 +02:00
|
|
|
LWTRANCHE_SHARED_TIDBITMAP,
|
Support Parallel Append plan nodes.
When we create an Append node, we can spread out the workers over the
subplans instead of piling on to each subplan one at a time, which
should typically be a bit more efficient, both because the startup
cost of any plan executed entirely by one worker is paid only once and
also because of reduced contention. We can also construct Append
plans using a mix of partial and non-partial subplans, which may allow
for parallelism in places that otherwise couldn't support it.
Unfortunately, this patch doesn't handle the important case of
parallelizing UNION ALL by running each branch in a separate worker;
the executor infrastructure is added here, but more planner work is
needed.
Amit Khandekar, Robert Haas, Amul Sul, reviewed and tested by
Ashutosh Bapat, Amit Langote, Rafia Sabih, Amit Kapila, and
Rajkumar Raghuwanshi.
Discussion: http://postgr.es/m/CAJ3gD9dy0K_E8r727heqXoBmWZ83HwLFwdcaSSmBQ1+S+vRuUQ@mail.gmail.com
2017-12-05 23:28:39 +01:00
|
|
|
LWTRANCHE_PARALLEL_APPEND,
|
2020-05-16 00:11:03 +02:00
|
|
|
LWTRANCHE_PER_XACT_PREDICATE_LIST,
|
2022-04-07 06:29:46 +02:00
|
|
|
LWTRANCHE_PGSTATS_DSA,
|
|
|
|
LWTRANCHE_PGSTATS_HASH,
|
|
|
|
LWTRANCHE_PGSTATS_DATA,
|
2023-01-22 20:08:46 +01:00
|
|
|
LWTRANCHE_LAUNCHER_DSA,
|
|
|
|
LWTRANCHE_LAUNCHER_HASH,
|
2015-12-15 17:32:13 +01:00
|
|
|
LWTRANCHE_FIRST_USER_DEFINED
|
|
|
|
} BuiltinTrancheIds;
|
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/*
|
|
|
|
* Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer
|
|
|
|
* to LWLocks. New code should instead use LWLock *. However, for the
|
|
|
|
* convenience of third-party code, we include the following typedef.
|
|
|
|
*/
|
|
|
|
typedef LWLock *LWLockId;
|
2006-08-01 21:03:11 +02:00
|
|
|
|
2001-09-29 06:02:27 +02:00
|
|
|
#endif /* LWLOCK_H */
|