2001-09-29 06:02:27 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* lwlock.h
|
|
|
|
* Lightweight lock manager
|
|
|
|
*
|
|
|
|
*
|
2019-01-02 18:44:25 +01:00
|
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
2001-09-29 06:02:27 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/include/storage/lwlock.h
|
2001-09-29 06:02:27 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#ifndef LWLOCK_H
|
|
|
|
#define LWLOCK_H
|
|
|
|
|
2015-08-07 15:10:56 +02:00
|
|
|
#ifdef FRONTEND
|
|
|
|
#error "lwlock.h may not be included from frontend code"
|
|
|
|
#endif
|
|
|
|
|
2016-08-16 00:09:55 +02:00
|
|
|
#include "storage/proclist_types.h"
|
2014-01-27 17:07:44 +01:00
|
|
|
#include "storage/s_lock.h"
|
2014-12-25 17:24:30 +01:00
|
|
|
#include "port/atomics.h"
|
2014-01-27 17:07:44 +01:00
|
|
|
|
|
|
|
struct PGPROC;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Code outside of lwlock.c should not manipulate the contents of this
|
|
|
|
* structure directly, but we have to declare it here to allow LWLocks to be
|
|
|
|
* incorporated into other data structures.
|
|
|
|
*/
|
|
|
|
typedef struct LWLock
|
|
|
|
{
|
2014-12-25 17:24:30 +01:00
|
|
|
uint16 tranche; /* tranche ID */
|
2015-09-05 10:35:49 +02:00
|
|
|
pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */
|
2016-08-16 00:09:55 +02:00
|
|
|
proclist_head waiters; /* list of waiting PGPROCs */
|
2014-12-25 17:24:30 +01:00
|
|
|
#ifdef LOCK_DEBUG
|
2016-04-11 05:12:32 +02:00
|
|
|
pg_atomic_uint32 nwaiters; /* number of waiters */
|
2015-09-05 10:35:49 +02:00
|
|
|
struct PGPROC *owner; /* last exclusive owner of the lock */
|
2014-12-25 17:24:30 +01:00
|
|
|
#endif
|
2014-01-27 17:07:44 +01:00
|
|
|
} LWLock;
|
|
|
|
|
|
|
|
/*
|
2015-12-15 19:32:54 +01:00
|
|
|
* In most cases, it's desirable to force each tranche of LWLocks to be aligned
|
|
|
|
* on a cache line boundary and make the array stride a power of 2. This saves
|
|
|
|
* a few cycles in indexing, but more importantly ensures that individual
|
|
|
|
* LWLocks don't cross cache line boundaries. This reduces cache contention
|
|
|
|
* problems, especially on AMD Opterons. In some cases, it's useful to add
|
|
|
|
* even more padding so that each LWLock takes up an entire cache line; this is
|
|
|
|
* useful, for example, in the main LWLock array, where the overall number of
|
|
|
|
* locks is small but some are heavily contended.
|
|
|
|
*
|
|
|
|
* When allocating a tranche that contains data other than LWLocks, it is
|
|
|
|
* probably best to include a bare LWLock and then pad the resulting structure
|
|
|
|
* as necessary for performance. For an array that contains only LWLocks,
|
|
|
|
* LWLockMinimallyPadded can be used for cases where we just want to ensure
|
|
|
|
* that we don't cross cache line boundaries within a single lock, while
|
|
|
|
* LWLockPadded can be used for cases where we want each lock to be an entire
|
|
|
|
* cache line.
|
2014-01-27 17:07:44 +01:00
|
|
|
*
|
2017-01-04 18:03:40 +01:00
|
|
|
* An LWLockMinimallyPadded might contain more than the absolute minimum amount
|
|
|
|
* of padding required to keep a lock from crossing a cache line boundary,
|
|
|
|
* because an unpadded LWLock will normally fit into 16 bytes. We ignore that
|
|
|
|
* possibility when determining the minimal amount of padding. Older releases
|
|
|
|
* had larger LWLocks, so 32 really was the minimum, and packing them in
|
|
|
|
* tighter might hurt performance.
|
2015-12-15 19:32:54 +01:00
|
|
|
*
|
|
|
|
* LWLOCK_MINIMAL_SIZE should be 32 on basically all common platforms, but
|
2017-01-04 18:03:40 +01:00
|
|
|
* because pg_atomic_uint32 is more than 4 bytes on some obscure platforms, we
|
|
|
|
* allow for the possibility that it might be 64. Even on those platforms,
|
|
|
|
* we probably won't exceed 32 bytes unless LOCK_DEBUG is defined.
|
2014-01-27 17:07:44 +01:00
|
|
|
*/
|
2015-12-15 19:32:54 +01:00
|
|
|
#define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE
|
|
|
|
#define LWLOCK_MINIMAL_SIZE (sizeof(LWLock) <= 32 ? 32 : 64)
|
2014-01-27 17:07:44 +01:00
|
|
|
|
2015-12-15 19:32:54 +01:00
|
|
|
/* LWLock, padded to a full cache line size */
|
2014-01-27 17:07:44 +01:00
|
|
|
typedef union LWLockPadded
|
|
|
|
{
|
|
|
|
LWLock lock;
|
|
|
|
char pad[LWLOCK_PADDED_SIZE];
|
|
|
|
} LWLockPadded;
|
2015-12-15 19:32:54 +01:00
|
|
|
|
|
|
|
/* LWLock, minimally padded */
|
|
|
|
typedef union LWLockMinimallyPadded
|
|
|
|
{
|
|
|
|
LWLock lock;
|
|
|
|
char pad[LWLOCK_MINIMAL_SIZE];
|
|
|
|
} LWLockMinimallyPadded;
|
|
|
|
|
2014-02-17 02:12:43 +01:00
|
|
|
extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
|
2018-10-16 05:45:30 +02:00
|
|
|
extern const char *const MainLWLockNames[];
|
2014-01-27 17:07:44 +01:00
|
|
|
|
2016-02-04 22:43:04 +01:00
|
|
|
/* struct for storing named tranche information */
|
|
|
|
typedef struct NamedLWLockTranche
|
|
|
|
{
|
|
|
|
int trancheId;
|
2016-12-16 17:29:23 +01:00
|
|
|
char *trancheName;
|
2016-02-04 22:43:04 +01:00
|
|
|
} NamedLWLockTranche;
|
|
|
|
|
|
|
|
extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray;
|
|
|
|
extern PGDLLIMPORT int NamedLWLockTrancheRequests;
|
|
|
|
|
2015-09-11 19:58:28 +02:00
|
|
|
/* Names for fixed lwlocks */
|
2015-09-11 20:53:41 +02:00
|
|
|
#include "storage/lwlocknames.h"
|
2014-01-27 17:07:44 +01:00
|
|
|
|
2006-07-23 05:07:58 +02:00
|
|
|
/*
|
|
|
|
* It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
|
2014-01-27 17:07:44 +01:00
|
|
|
* here, but we need them to figure out offsets within MainLWLockArray, and
|
|
|
|
* having this file include lock.h or bufmgr.h would be backwards.
|
2006-07-23 05:07:58 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Number of partitions of the shared buffer mapping hashtable */
|
2014-10-02 19:58:50 +02:00
|
|
|
#define NUM_BUFFER_PARTITIONS 128
|
2006-07-23 05:07:58 +02:00
|
|
|
|
|
|
|
/* Number of partitions the shared lock tables are divided into */
|
2006-07-24 01:08:46 +02:00
|
|
|
#define LOG2_NUM_LOCK_PARTITIONS 4
|
|
|
|
#define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
|
2006-07-23 05:07:58 +02:00
|
|
|
|
Implement genuine serializable isolation level.
Until now, our Serializable mode has in fact been what's called Snapshot
Isolation, which allows some anomalies that could not occur in any
serialized ordering of the transactions. This patch fixes that using a
method called Serializable Snapshot Isolation, based on research papers by
Michael J. Cahill (see README-SSI for full references). In Serializable
Snapshot Isolation, transactions run like they do in Snapshot Isolation,
but a predicate lock manager observes the reads and writes performed and
aborts transactions if it detects that an anomaly might occur. This method
produces some false positives, ie. it sometimes aborts transactions even
though there is no anomaly.
To track reads we implement predicate locking, see storage/lmgr/predicate.c.
Whenever a tuple is read, a predicate lock is acquired on the tuple. Shared
memory is finite, so when a transaction takes many tuple-level locks on a
page, the locks are promoted to a single page-level lock, and further to a
single relation level lock if necessary. To lock key values with no matching
tuple, a sequential scan always takes a relation-level lock, and an index
scan acquires a page-level lock that covers the search key, whether or not
there are any matching keys at the moment.
A predicate lock doesn't conflict with any regular locks or with another
predicate locks in the normal sense. They're only used by the predicate lock
manager to detect the danger of anomalies. Only serializable transactions
participate in predicate locking, so there should be no extra overhead for
for other transactions.
Predicate locks can't be released at commit, but must be remembered until
all the transactions that overlapped with it have completed. That means that
we need to remember an unbounded amount of predicate locks, so we apply a
lossy but conservative method of tracking locks for committed transactions.
If we run short of shared memory, we overflow to a new "pg_serial" SLRU
pool.
We don't currently allow Serializable transactions in Hot Standby mode.
That would be hard, because even read-only transactions can cause anomalies
that wouldn't otherwise occur.
Serializable isolation mode now means the new fully serializable level.
Repeatable Read gives you the old Snapshot Isolation level that we have
always had.
Kevin Grittner and Dan Ports, reviewed by Jeff Davis, Heikki Linnakangas and
Anssi Kääriäinen
2011-02-07 22:46:51 +01:00
|
|
|
/* Number of partitions the shared predicate lock tables are divided into */
|
|
|
|
#define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
|
|
|
|
#define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
|
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/* Offsets for various chunks of preallocated lwlocks. */
|
|
|
|
#define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
|
|
|
|
#define LOCK_MANAGER_LWLOCK_OFFSET \
|
|
|
|
(BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
|
2014-05-06 18:12:18 +02:00
|
|
|
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
|
2014-06-19 15:40:37 +02:00
|
|
|
(LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS)
|
2014-01-27 17:07:44 +01:00
|
|
|
#define NUM_FIXED_LWLOCKS \
|
|
|
|
(PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
|
2001-09-29 06:02:27 +02:00
|
|
|
|
|
|
|
typedef enum LWLockMode
|
|
|
|
{
|
2001-10-28 07:26:15 +01:00
|
|
|
LW_EXCLUSIVE,
|
Make group commit more effective.
When a backend needs to flush the WAL, and someone else is already flushing
the WAL, wait until it releases the WALInsertLock and check if we still need
to do the flush or if the other backend already did the work for us, before
acquiring WALInsertLock. This helps group commit, because when the WAL flush
finishes, all the backends that were waiting for it can be woken up in one
go, and the can all concurrently observe that they're done, rather than
waking them up one by one in a cascading fashion.
This is based on a new LWLock function, LWLockWaitUntilFree(), which has
peculiar semantics. If the lock is immediately free, it grabs the lock and
returns true. If it's not free, it waits until it is released, but then
returns false without grabbing the lock. This is used in XLogFlush(), so
that when the lock is acquired, the backend flushes the WAL, but if it's
not, the backend first checks the current flush location before retrying.
Original patch and benchmarking by Peter Geoghegan and Simon Riggs, although
this patch as committed ended up being very different from that.
2012-01-30 15:40:58 +01:00
|
|
|
LW_SHARED,
|
2019-07-29 05:28:30 +02:00
|
|
|
LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwWaitMode,
|
2012-06-10 21:20:04 +02:00
|
|
|
* when waiting for lock to become free. Not
|
|
|
|
* to be used as LWLockAcquire argument */
|
2001-09-29 06:02:27 +02:00
|
|
|
} LWLockMode;
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef LOCK_DEBUG
|
|
|
|
extern bool Trace_lwlocks;
|
|
|
|
#endif
|
|
|
|
|
2014-03-21 15:06:08 +01:00
|
|
|
extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
|
2014-01-27 17:07:44 +01:00
|
|
|
extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
|
|
|
|
extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
|
|
|
|
extern void LWLockRelease(LWLock *lock);
|
2015-07-31 20:20:43 +02:00
|
|
|
extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
|
2001-09-29 06:02:27 +02:00
|
|
|
extern void LWLockReleaseAll(void);
|
2014-01-27 17:07:44 +01:00
|
|
|
extern bool LWLockHeldByMe(LWLock *lock);
|
2016-09-05 11:38:08 +02:00
|
|
|
extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
|
2001-09-29 06:02:27 +02:00
|
|
|
|
2014-03-21 15:06:08 +01:00
|
|
|
extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
|
|
|
|
extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value);
|
|
|
|
|
2005-08-21 01:26:37 +02:00
|
|
|
extern Size LWLockShmemSize(void);
|
2001-09-29 06:02:27 +02:00
|
|
|
extern void CreateLWLocks(void);
|
2014-06-30 09:13:48 +02:00
|
|
|
extern void InitLWLockAccess(void);
|
2001-10-28 07:26:15 +01:00
|
|
|
|
2016-10-04 16:50:13 +02:00
|
|
|
extern const char *GetLWLockIdentifier(uint32 classId, uint16 eventId);
|
2016-03-10 18:44:09 +01:00
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/*
|
2016-02-04 22:43:04 +01:00
|
|
|
* Extensions (or core code) can obtain an LWLocks by calling
|
|
|
|
* RequestNamedLWLockTranche() during postmaster startup. Subsequently,
|
2016-02-05 13:56:59 +01:00
|
|
|
* call GetNamedLWLockTranche() to obtain a pointer to an array containing
|
|
|
|
* the number of LWLocks requested.
|
2014-01-27 17:07:44 +01:00
|
|
|
*/
|
2016-02-04 22:43:04 +01:00
|
|
|
extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks);
|
|
|
|
extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name);
|
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/*
|
|
|
|
* There is another, more flexible method of obtaining lwlocks. First, call
|
|
|
|
* LWLockNewTrancheId just once to obtain a tranche ID; this allocates from
|
|
|
|
* a shared counter. Next, each individual process using the tranche should
|
2016-12-16 17:29:23 +01:00
|
|
|
* call LWLockRegisterTranche() to associate that tranche ID with a name.
|
|
|
|
* Finally, LWLockInitialize should be called just once per lwlock, passing
|
|
|
|
* the tranche ID as an argument.
|
2014-01-27 17:07:44 +01:00
|
|
|
*
|
|
|
|
* It may seem strange that each process using the tranche must register it
|
|
|
|
* separately, but dynamic shared memory segments aren't guaranteed to be
|
|
|
|
* mapped at the same address in all coordinating backends, so storing the
|
|
|
|
* registration in the main shared memory segment wouldn't work for that case.
|
|
|
|
*/
|
2014-05-06 18:12:18 +02:00
|
|
|
extern int LWLockNewTrancheId(void);
|
2017-10-31 15:34:31 +01:00
|
|
|
extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name);
|
2014-10-27 09:45:57 +01:00
|
|
|
extern void LWLockInitialize(LWLock *lock, int tranche_id);
|
2014-01-27 17:07:44 +01:00
|
|
|
|
2015-12-15 17:32:13 +01:00
|
|
|
/*
|
2016-12-16 17:29:23 +01:00
|
|
|
* Every tranche ID less than NUM_INDIVIDUAL_LWLOCKS is reserved; also,
|
|
|
|
* we reserve additional tranche IDs for builtin tranches not included in
|
|
|
|
* the set of individual LWLocks. A call to LWLockNewTrancheId will never
|
|
|
|
* return a value less than LWTRANCHE_FIRST_USER_DEFINED.
|
2015-12-15 17:32:13 +01:00
|
|
|
*/
|
|
|
|
typedef enum BuiltinTrancheIds
|
|
|
|
{
|
2016-12-16 17:29:23 +01:00
|
|
|
LWTRANCHE_CLOG_BUFFERS = NUM_INDIVIDUAL_LWLOCKS,
|
2016-02-02 12:42:14 +01:00
|
|
|
LWTRANCHE_COMMITTS_BUFFERS,
|
|
|
|
LWTRANCHE_SUBTRANS_BUFFERS,
|
|
|
|
LWTRANCHE_MXACTOFFSET_BUFFERS,
|
|
|
|
LWTRANCHE_MXACTMEMBER_BUFFERS,
|
|
|
|
LWTRANCHE_ASYNC_BUFFERS,
|
|
|
|
LWTRANCHE_OLDSERXID_BUFFERS,
|
2015-12-15 17:32:13 +01:00
|
|
|
LWTRANCHE_WAL_INSERT,
|
2015-12-15 19:32:54 +01:00
|
|
|
LWTRANCHE_BUFFER_CONTENT,
|
|
|
|
LWTRANCHE_BUFFER_IO_IN_PROGRESS,
|
2016-02-02 12:42:14 +01:00
|
|
|
LWTRANCHE_REPLICATION_ORIGIN,
|
2016-01-29 15:44:29 +01:00
|
|
|
LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
|
2016-01-29 14:10:47 +01:00
|
|
|
LWTRANCHE_PROC,
|
2016-02-11 20:07:33 +01:00
|
|
|
LWTRANCHE_BUFFER_MAPPING,
|
|
|
|
LWTRANCHE_LOCK_MANAGER,
|
|
|
|
LWTRANCHE_PREDICATE_LOCK_MANAGER,
|
Add parallel-aware hash joins.
Introduce parallel-aware hash joins that appear in EXPLAIN plans as Parallel
Hash Join with Parallel Hash. While hash joins could already appear in
parallel queries, they were previously always parallel-oblivious and had a
partial subplan only on the outer side, meaning that the work of the inner
subplan was duplicated in every worker.
After this commit, the planner will consider using a partial subplan on the
inner side too, using the Parallel Hash node to divide the work over the
available CPU cores and combine its results in shared memory. If the join
needs to be split into multiple batches in order to respect work_mem, then
workers process different batches as much as possible and then work together
on the remaining batches.
The advantages of a parallel-aware hash join over a parallel-oblivious hash
join used in a parallel query are that it:
* avoids wasting memory on duplicated hash tables
* avoids wasting disk space on duplicated batch files
* divides the work of building the hash table over the CPUs
One disadvantage is that there is some communication between the participating
CPUs which might outweigh the benefits of parallelism in the case of small
hash tables. This is avoided by the planner's existing reluctance to supply
partial plans for small scans, but it may be necessary to estimate
synchronization costs in future if that situation changes. Another is that
outer batch 0 must be written to disk if multiple batches are required.
A potential future advantage of parallel-aware hash joins is that right and
full outer joins could be supported, since there is a single set of matched
bits for each hashtable, but that is not yet implemented.
A new GUC enable_parallel_hash is defined to control the feature, defaulting
to on.
Author: Thomas Munro
Reviewed-By: Andres Freund, Robert Haas
Tested-By: Rafia Sabih, Prabhat Sahu
Discussion:
https://postgr.es/m/CAEepm=2W=cOkiZxcg6qiFQP-dHUe09aqTrEMM7yJDrHMhDv_RA@mail.gmail.com
https://postgr.es/m/CAEepm=37HKyJ4U6XOLi=JgfSHM3o6B-GaeO-6hkOmneTDkH+Uw@mail.gmail.com
2017-12-21 08:39:21 +01:00
|
|
|
LWTRANCHE_PARALLEL_HASH_JOIN,
|
2016-12-19 22:47:15 +01:00
|
|
|
LWTRANCHE_PARALLEL_QUERY_DSA,
|
2017-09-15 04:59:21 +02:00
|
|
|
LWTRANCHE_SESSION_DSA,
|
|
|
|
LWTRANCHE_SESSION_RECORD_TABLE,
|
|
|
|
LWTRANCHE_SESSION_TYPMOD_TABLE,
|
2017-12-18 23:23:19 +01:00
|
|
|
LWTRANCHE_SHARED_TUPLESTORE,
|
2017-03-08 14:02:03 +01:00
|
|
|
LWTRANCHE_TBM,
|
Support Parallel Append plan nodes.
When we create an Append node, we can spread out the workers over the
subplans instead of piling on to each subplan one at a time, which
should typically be a bit more efficient, both because the startup
cost of any plan executed entirely by one worker is paid only once and
also because of reduced contention. We can also construct Append
plans using a mix of partial and non-partial subplans, which may allow
for parallelism in places that otherwise couldn't support it.
Unfortunately, this patch doesn't handle the important case of
parallelizing UNION ALL by running each branch in a separate worker;
the executor infrastructure is added here, but more planner work is
needed.
Amit Khandekar, Robert Haas, Amul Sul, reviewed and tested by
Ashutosh Bapat, Amit Langote, Rafia Sabih, Amit Kapila, and
Rajkumar Raghuwanshi.
Discussion: http://postgr.es/m/CAJ3gD9dy0K_E8r727heqXoBmWZ83HwLFwdcaSSmBQ1+S+vRuUQ@mail.gmail.com
2017-12-05 23:28:39 +01:00
|
|
|
LWTRANCHE_PARALLEL_APPEND,
|
Enable parallel query with SERIALIZABLE isolation.
Previously, the SERIALIZABLE isolation level prevented parallel query
from being used. Allow the two features to be used together by
sharing the leader's SERIALIZABLEXACT with parallel workers.
An extra per-SERIALIZABLEXACT LWLock is introduced to make it safe to
share, and new logic is introduced to coordinate the early release
of the SERIALIZABLEXACT required for the SXACT_FLAG_RO_SAFE
optimization, as follows:
The first backend to observe the SXACT_FLAG_RO_SAFE flag (set by
some other transaction) will 'partially release' the SERIALIZABLEXACT,
meaning that the conflicts and locks it holds are released, but the
SERIALIZABLEXACT itself will remain active because other backends
might still have a pointer to it.
Whenever any backend notices the SXACT_FLAG_RO_SAFE flag, it clears
its own MySerializableXact variable and frees local resources so that
it can skip SSI checks for the rest of the transaction. In the
special case of the leader process, it transfers the SERIALIZABLEXACT
to a new variable SavedSerializableXact, so that it can be completely
released at the end of the transaction after all workers have exited.
Remove the serializable_okay flag added to CreateParallelContext() by
commit 9da0cc35, because it's now redundant.
Author: Thomas Munro
Reviewed-by: Haribabu Kommi, Robert Haas, Masahiko Sawada, Kevin Grittner
Discussion: https://postgr.es/m/CAEepm=0gXGYhtrVDWOTHS8SQQy_=S9xo+8oCxGLWZAOoeJ=yzQ@mail.gmail.com
2019-03-15 04:23:46 +01:00
|
|
|
LWTRANCHE_SXACT,
|
2015-12-15 17:32:13 +01:00
|
|
|
LWTRANCHE_FIRST_USER_DEFINED
|
2017-06-21 20:39:04 +02:00
|
|
|
} BuiltinTrancheIds;
|
2015-12-15 17:32:13 +01:00
|
|
|
|
2014-01-27 17:07:44 +01:00
|
|
|
/*
|
|
|
|
* Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer
|
|
|
|
* to LWLocks. New code should instead use LWLock *. However, for the
|
|
|
|
* convenience of third-party code, we include the following typedef.
|
|
|
|
*/
|
|
|
|
typedef LWLock *LWLockId;
|
2006-08-01 21:03:11 +02:00
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
#endif /* LWLOCK_H */
|