postgresql/src/backend/access/transam/parallel.c

1167 lines
35 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* parallel.c
* Infrastructure for launching parallel workers
*
* Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/access/transam/parallel.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/parallel.h"
#include "access/xact.h"
#include "access/xlog.h"
Improve the situation for parallel query versus temp relations. Transmit the leader's temp-namespace state to workers. This is important because without it, the workers do not really have the same search path as the leader. For example, there is no good reason (and no extant code either) to prevent a worker from executing a temp function that the leader created previously; but as things stood it would fail to find the temp function, and then either fail or execute the wrong function entirely. We still prohibit a worker from creating a temp namespace on its own. In effect, a worker can only see the session's temp namespace if the leader had created it before starting the worker, which seems like the right semantics. Also, transmit the leader's BackendId to workers, and arrange for workers to use that when determining the physical file path of a temp relation belonging to their session. While the original intent was to prevent such accesses entirely, there were a number of holes in that, notably in places like dbsize.c which assume they can safely access temp rels of other sessions anyway. We might as well get this right, as a small down payment on someday allowing workers to access the leader's temp tables. (With this change, directly using "MyBackendId" as a relation or buffer backend ID is deprecated; you should use BackendIdForTempRelations() instead. I left a couple of such uses alone though, as they're not going to be reachable in parallel workers until we do something about localbuf.c.) Move the thou-shalt-not-access-thy-leader's-temp-tables prohibition down into localbuf.c, which is where it actually matters, instead of having it in relation_open(). This amounts to recognizing that access to temp tables' catalog entries is perfectly safe in a worker, it's only the data in local buffers that is problematic. Having done all that, we can get rid of the test in has_parallel_hazard() that says that use of a temp table's rowtype is unsafe in parallel workers. That test was unduly expensive, and if we really did need such a prohibition, that was not even close to being a bulletproof guard for it. (For example, any user-defined function executed in a parallel worker might have attempted such access.)
2016-06-10 02:16:11 +02:00
#include "catalog/namespace.h"
#include "commands/async.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
#include "libpq/pqmq.h"
#include "miscadmin.h"
#include "optimizer/planmain.h"
#include "storage/ipc.h"
#include "storage/sinval.h"
#include "storage/spin.h"
#include "tcop/tcopprot.h"
#include "utils/combocid.h"
#include "utils/guc.h"
#include "utils/inval.h"
#include "utils/memutils.h"
#include "utils/resowner.h"
#include "utils/snapmgr.h"
/*
* We don't want to waste a lot of memory on an error queue which, most of
* the time, will process only a handful of small messages. However, it is
* desirable to make it large enough that a typical ErrorResponse can be sent
* without blocking. That way, a worker that errors out can write the whole
* message into the queue and terminate without waiting for the user backend.
*/
2015-05-24 03:35:49 +02:00
#define PARALLEL_ERROR_QUEUE_SIZE 16384
/* Magic number for parallel context TOC. */
#define PARALLEL_MAGIC 0x50477c7c
/*
* Magic numbers for parallel state sharing. Higher-level code should use
* smaller values, leaving these very large ones for use by this module.
*/
#define PARALLEL_KEY_FIXED UINT64CONST(0xFFFFFFFFFFFF0001)
#define PARALLEL_KEY_ERROR_QUEUE UINT64CONST(0xFFFFFFFFFFFF0002)
#define PARALLEL_KEY_LIBRARY UINT64CONST(0xFFFFFFFFFFFF0003)
#define PARALLEL_KEY_GUC UINT64CONST(0xFFFFFFFFFFFF0004)
#define PARALLEL_KEY_COMBO_CID UINT64CONST(0xFFFFFFFFFFFF0005)
#define PARALLEL_KEY_TRANSACTION_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0006)
#define PARALLEL_KEY_ACTIVE_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0007)
#define PARALLEL_KEY_TRANSACTION_STATE UINT64CONST(0xFFFFFFFFFFFF0008)
#define PARALLEL_KEY_EXTENSION_TRAMPOLINE UINT64CONST(0xFFFFFFFFFFFF0009)
/* Fixed-size parallel state. */
typedef struct FixedParallelState
{
/* Fixed-size state that workers must restore. */
Oid database_id;
Oid authenticated_user_id;
Oid current_user_id;
Improve the situation for parallel query versus temp relations. Transmit the leader's temp-namespace state to workers. This is important because without it, the workers do not really have the same search path as the leader. For example, there is no good reason (and no extant code either) to prevent a worker from executing a temp function that the leader created previously; but as things stood it would fail to find the temp function, and then either fail or execute the wrong function entirely. We still prohibit a worker from creating a temp namespace on its own. In effect, a worker can only see the session's temp namespace if the leader had created it before starting the worker, which seems like the right semantics. Also, transmit the leader's BackendId to workers, and arrange for workers to use that when determining the physical file path of a temp relation belonging to their session. While the original intent was to prevent such accesses entirely, there were a number of holes in that, notably in places like dbsize.c which assume they can safely access temp rels of other sessions anyway. We might as well get this right, as a small down payment on someday allowing workers to access the leader's temp tables. (With this change, directly using "MyBackendId" as a relation or buffer backend ID is deprecated; you should use BackendIdForTempRelations() instead. I left a couple of such uses alone though, as they're not going to be reachable in parallel workers until we do something about localbuf.c.) Move the thou-shalt-not-access-thy-leader's-temp-tables prohibition down into localbuf.c, which is where it actually matters, instead of having it in relation_open(). This amounts to recognizing that access to temp tables' catalog entries is perfectly safe in a worker, it's only the data in local buffers that is problematic. Having done all that, we can get rid of the test in has_parallel_hazard() that says that use of a temp table's rowtype is unsafe in parallel workers. That test was unduly expensive, and if we really did need such a prohibition, that was not even close to being a bulletproof guard for it. (For example, any user-defined function executed in a parallel worker might have attempted such access.)
2016-06-10 02:16:11 +02:00
Oid temp_namespace_id;
Oid temp_toast_namespace_id;
int sec_context;
PGPROC *parallel_master_pgproc;
pid_t parallel_master_pid;
BackendId parallel_master_backend_id;
/* Entrypoint for parallel workers. */
2015-05-24 03:35:49 +02:00
parallel_worker_main_type entrypoint;
/* Mutex protects remaining fields. */
slock_t mutex;
/* Maximum XactLastRecEnd of any worker. */
XLogRecPtr last_xlog_end;
} FixedParallelState;
/*
* Our parallel worker number. We initialize this to -1, meaning that we are
* not a parallel worker. In parallel workers, it will be set to a value >= 0
* and < the number of workers before any user code is invoked; each parallel
* worker will get a different parallel worker number.
*/
2015-05-24 03:35:49 +02:00
int ParallelWorkerNumber = -1;
/* Is there a parallel message pending which we need to receive? */
volatile bool ParallelMessagePending = false;
/* Are we initializing a parallel worker? */
bool InitializingParallelWorker = false;
/* Pointer to our fixed parallel state. */
static FixedParallelState *MyFixedParallelState;
/* List of active parallel contexts. */
static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
/* Private functions. */
static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg);
static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
static void ParallelWorkerMain(Datum main_arg);
static void WaitForParallelWorkersToExit(ParallelContext *pcxt);
/*
* Establish a new parallel context. This should be done after entering
* parallel mode, and (unless there is an error) the context should be
* destroyed before exiting the current subtransaction.
*/
ParallelContext *
CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
{
2015-05-24 03:35:49 +02:00
MemoryContext oldcontext;
ParallelContext *pcxt;
/* It is unsafe to create a parallel context if not in parallel mode. */
Assert(IsInParallelMode());
/* Number of workers should be non-negative. */
Assert(nworkers >= 0);
/*
* If dynamic shared memory is not available, we won't be able to use
* background workers.
*/
if (dynamic_shared_memory_type == DSM_IMPL_NONE)
nworkers = 0;
/*
2016-06-10 00:02:36 +02:00
* If we are running under serializable isolation, we can't use parallel
* workers, at least not until somebody enhances that mechanism to be
* parallel-aware.
*/
if (IsolationIsSerializable())
nworkers = 0;
/* We might be running in a short-lived memory context. */
oldcontext = MemoryContextSwitchTo(TopTransactionContext);
/* Initialize a new ParallelContext. */
pcxt = palloc0(sizeof(ParallelContext));
pcxt->subid = GetCurrentSubTransactionId();
pcxt->nworkers = nworkers;
pcxt->entrypoint = entrypoint;
pcxt->error_context_stack = error_context_stack;
shm_toc_initialize_estimator(&pcxt->estimator);
dlist_push_head(&pcxt_list, &pcxt->node);
/* Restore previous memory context. */
MemoryContextSwitchTo(oldcontext);
return pcxt;
}
/*
* Establish a new parallel context that calls a function provided by an
* extension. This works around the fact that the library might get mapped
* at a different address in each backend.
*/
ParallelContext *
CreateParallelContextForExternalFunction(char *library_name,
char *function_name,
int nworkers)
{
2015-05-24 03:35:49 +02:00
MemoryContext oldcontext;
ParallelContext *pcxt;
/* We might be running in a very short-lived memory context. */
oldcontext = MemoryContextSwitchTo(TopTransactionContext);
/* Create the context. */
pcxt = CreateParallelContext(ParallelExtensionTrampoline, nworkers);
pcxt->library_name = pstrdup(library_name);
pcxt->function_name = pstrdup(function_name);
/* Restore previous memory context. */
MemoryContextSwitchTo(oldcontext);
return pcxt;
}
/*
* Establish the dynamic shared memory segment for a parallel context and
* copy state and other bookkeeping information that will be needed by
* parallel workers into it.
*/
void
InitializeParallelDSM(ParallelContext *pcxt)
{
2015-05-24 03:35:49 +02:00
MemoryContext oldcontext;
Size library_len = 0;
Size guc_len = 0;
Size combocidlen = 0;
Size tsnaplen = 0;
Size asnaplen = 0;
Size tstatelen = 0;
Size segsize = 0;
int i;
FixedParallelState *fps;
Snapshot transaction_snapshot = GetTransactionSnapshot();
Snapshot active_snapshot = GetActiveSnapshot();
/* We might be running in a very short-lived memory context. */
oldcontext = MemoryContextSwitchTo(TopTransactionContext);
/* Allow space to store the fixed-size parallel state. */
shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
2015-05-24 03:35:49 +02:00
* Normally, the user will have requested at least one worker process, but
* if by chance they have not, we can skip a bunch of things here.
*/
if (pcxt->nworkers > 0)
{
/* Estimate space for various kinds of state sharing. */
library_len = EstimateLibraryStateSpace();
shm_toc_estimate_chunk(&pcxt->estimator, library_len);
guc_len = EstimateGUCStateSpace();
shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
combocidlen = EstimateComboCIDStateSpace();
shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
asnaplen = EstimateSnapshotSpace(active_snapshot);
shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
tstatelen = EstimateTransactionStateSpace();
shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
/* If you add more chunks here, you probably need to add keys. */
shm_toc_estimate_keys(&pcxt->estimator, 6);
/* Estimate space need for error queues. */
StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
2015-05-24 03:35:49 +02:00
PARALLEL_ERROR_QUEUE_SIZE,
"parallel error queue size not buffer-aligned");
shm_toc_estimate_chunk(&pcxt->estimator,
mul_size(PARALLEL_ERROR_QUEUE_SIZE,
pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1);
/* Estimate how much we'll need for extension entrypoint info. */
if (pcxt->library_name != NULL)
{
Assert(pcxt->entrypoint == ParallelExtensionTrampoline);
Assert(pcxt->function_name != NULL);
shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
+ strlen(pcxt->function_name) + 2);
shm_toc_estimate_keys(&pcxt->estimator, 1);
}
}
/*
* Create DSM and initialize with new table of contents. But if the user
* didn't request any workers, then don't bother creating a dynamic shared
* memory segment; instead, just use backend-private memory.
*
* Also, if we can't create a dynamic shared memory segment because the
2015-05-24 03:35:49 +02:00
* maximum number of segments have already been created, then fall back to
* backend-private memory, and plan not to use any workers. We hope this
* won't happen very often, but it's better to abandon the use of
* parallelism than to fail outright.
*/
segsize = shm_toc_estimate(&pcxt->estimator);
if (pcxt->nworkers > 0)
pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
if (pcxt->seg != NULL)
pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
dsm_segment_address(pcxt->seg),
segsize);
else
{
pcxt->nworkers = 0;
pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
segsize);
}
/* Initialize fixed-size state in shared memory. */
fps = (FixedParallelState *)
shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
fps->database_id = MyDatabaseId;
fps->authenticated_user_id = GetAuthenticatedUserId();
GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
Improve the situation for parallel query versus temp relations. Transmit the leader's temp-namespace state to workers. This is important because without it, the workers do not really have the same search path as the leader. For example, there is no good reason (and no extant code either) to prevent a worker from executing a temp function that the leader created previously; but as things stood it would fail to find the temp function, and then either fail or execute the wrong function entirely. We still prohibit a worker from creating a temp namespace on its own. In effect, a worker can only see the session's temp namespace if the leader had created it before starting the worker, which seems like the right semantics. Also, transmit the leader's BackendId to workers, and arrange for workers to use that when determining the physical file path of a temp relation belonging to their session. While the original intent was to prevent such accesses entirely, there were a number of holes in that, notably in places like dbsize.c which assume they can safely access temp rels of other sessions anyway. We might as well get this right, as a small down payment on someday allowing workers to access the leader's temp tables. (With this change, directly using "MyBackendId" as a relation or buffer backend ID is deprecated; you should use BackendIdForTempRelations() instead. I left a couple of such uses alone though, as they're not going to be reachable in parallel workers until we do something about localbuf.c.) Move the thou-shalt-not-access-thy-leader's-temp-tables prohibition down into localbuf.c, which is where it actually matters, instead of having it in relation_open(). This amounts to recognizing that access to temp tables' catalog entries is perfectly safe in a worker, it's only the data in local buffers that is problematic. Having done all that, we can get rid of the test in has_parallel_hazard() that says that use of a temp table's rowtype is unsafe in parallel workers. That test was unduly expensive, and if we really did need such a prohibition, that was not even close to being a bulletproof guard for it. (For example, any user-defined function executed in a parallel worker might have attempted such access.)
2016-06-10 02:16:11 +02:00
GetTempNamespaceState(&fps->temp_namespace_id,
&fps->temp_toast_namespace_id);
fps->parallel_master_pgproc = MyProc;
fps->parallel_master_pid = MyProcPid;
fps->parallel_master_backend_id = MyBackendId;
fps->entrypoint = pcxt->entrypoint;
SpinLockInit(&fps->mutex);
fps->last_xlog_end = 0;
shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
/* We can skip the rest of this if we're not budgeting for any workers. */
if (pcxt->nworkers > 0)
{
2015-05-24 03:35:49 +02:00
char *libraryspace;
char *gucspace;
char *combocidspace;
char *tsnapspace;
char *asnapspace;
char *tstatespace;
char *error_queue_space;
/* Serialize shared libraries we have loaded. */
libraryspace = shm_toc_allocate(pcxt->toc, library_len);
SerializeLibraryState(library_len, libraryspace);
shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
/* Serialize GUC settings. */
gucspace = shm_toc_allocate(pcxt->toc, guc_len);
SerializeGUCState(guc_len, gucspace);
shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
/* Serialize combo CID state. */
combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
SerializeComboCIDState(combocidlen, combocidspace);
shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
/* Serialize transaction snapshot and active snapshot. */
tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
SerializeSnapshot(transaction_snapshot, tsnapspace);
shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
tsnapspace);
asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
SerializeSnapshot(active_snapshot, asnapspace);
shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
/* Serialize transaction state. */
tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
SerializeTransactionState(tstatelen, tstatespace);
shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
/* Allocate space for worker information. */
pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
/*
* Establish error queues in dynamic shared memory.
*
* These queues should be used only for transmitting ErrorResponse,
* NoticeResponse, and NotifyResponse protocol messages. Tuple data
* should be transmitted via separate (possibly larger?) queues.
*/
error_queue_space =
2015-05-24 03:35:49 +02:00
shm_toc_allocate(pcxt->toc,
mul_size(PARALLEL_ERROR_QUEUE_SIZE,
pcxt->nworkers));
for (i = 0; i < pcxt->nworkers; ++i)
{
2015-05-24 03:35:49 +02:00
char *start;
shm_mq *mq;
start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
shm_mq_set_receiver(mq, MyProc);
pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
}
shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
/* Serialize extension entrypoint information. */
if (pcxt->library_name != NULL)
{
2015-05-24 03:35:49 +02:00
Size lnamelen = strlen(pcxt->library_name);
char *extensionstate;
extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
+ strlen(pcxt->function_name) + 2);
strcpy(extensionstate, pcxt->library_name);
strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE,
extensionstate);
}
}
/* Restore previous memory context. */
MemoryContextSwitchTo(oldcontext);
}
/*
* Reinitialize the dynamic shared memory segment for a parallel context such
* that we could launch workers for it again.
*/
void
ReinitializeParallelDSM(ParallelContext *pcxt)
{
FixedParallelState *fps;
char *error_queue_space;
int i;
/* Wait for any old workers to exit. */
if (pcxt->nworkers_launched > 0)
{
WaitForParallelWorkersToFinish(pcxt);
WaitForParallelWorkersToExit(pcxt);
pcxt->nworkers_launched = 0;
}
/* Reset a few bits of fixed parallel state to a clean state. */
fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
fps->last_xlog_end = 0;
/* Recreate error queues. */
error_queue_space =
shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE);
for (i = 0; i < pcxt->nworkers; ++i)
{
char *start;
shm_mq *mq;
start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
shm_mq_set_receiver(mq, MyProc);
pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
}
}
/*
* Launch parallel workers.
*/
void
LaunchParallelWorkers(ParallelContext *pcxt)
{
2015-05-24 03:35:49 +02:00
MemoryContext oldcontext;
BackgroundWorker worker;
int i;
bool any_registrations_failed = false;
/* Skip this if we have no workers. */
if (pcxt->nworkers == 0)
return;
/* We need to be a lock group leader. */
BecomeLockGroupLeader();
/* If we do have workers, we'd better have a DSM segment. */
Assert(pcxt->seg != NULL);
/* We might be running in a short-lived memory context. */
oldcontext = MemoryContextSwitchTo(TopTransactionContext);
/* Configure a worker. */
snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
MyProcPid);
worker.bgw_flags =
BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
worker.bgw_start_time = BgWorkerStart_ConsistentState;
worker.bgw_restart_time = BGW_NEVER_RESTART;
worker.bgw_main = ParallelWorkerMain;
worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
worker.bgw_notify_pid = MyProcPid;
memset(&worker.bgw_extra, 0, BGW_EXTRALEN);
/*
* Start workers.
*
* The caller must be able to tolerate ending up with fewer workers than
* expected, so there is no need to throw an error here if registration
2015-05-24 03:35:49 +02:00
* fails. It wouldn't help much anyway, because registering the worker in
* no way guarantees that it will start up and initialize successfully.
*/
for (i = 0; i < pcxt->nworkers; ++i)
{
memcpy(worker.bgw_extra, &i, sizeof(int));
if (!any_registrations_failed &&
RegisterDynamicBackgroundWorker(&worker,
&pcxt->worker[i].bgwhandle))
{
shm_mq_set_handle(pcxt->worker[i].error_mqh,
pcxt->worker[i].bgwhandle);
pcxt->nworkers_launched++;
}
else
{
/*
2015-05-24 03:35:49 +02:00
* If we weren't able to register the worker, then we've bumped up
* against the max_worker_processes limit, and future
* registrations will probably fail too, so arrange to skip them.
* But we still have to execute this code for the remaining slots
* to make sure that we forget about the error queues we budgeted
* for those workers. Otherwise, we'll wait for them to start,
* but they never will.
*/
any_registrations_failed = true;
pcxt->worker[i].bgwhandle = NULL;
pfree(pcxt->worker[i].error_mqh);
pcxt->worker[i].error_mqh = NULL;
}
}
/* Restore previous memory context. */
MemoryContextSwitchTo(oldcontext);
}
/*
* Wait for all workers to finish computing.
*
* Even if the parallel operation seems to have completed successfully, it's
* important to call this function afterwards. We must not miss any errors
* the workers may have thrown during the parallel operation, or any that they
* may yet throw while shutting down.
*
* Also, we want to update our notion of XactLastRecEnd based on worker
* feedback.
*/
void
WaitForParallelWorkersToFinish(ParallelContext *pcxt)
{
for (;;)
{
2015-05-24 03:35:49 +02:00
bool anyone_alive = false;
int i;
/*
2015-05-24 03:35:49 +02:00
* This will process any parallel messages that are pending, which may
* change the outcome of the loop that follows. It may also throw an
* error propagated from a worker.
*/
CHECK_FOR_INTERRUPTS();
for (i = 0; i < pcxt->nworkers_launched; ++i)
{
if (pcxt->worker[i].error_mqh != NULL)
{
anyone_alive = true;
break;
}
}
if (!anyone_alive)
break;
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1);
ResetLatch(&MyProc->procLatch);
}
if (pcxt->toc != NULL)
{
FixedParallelState *fps;
fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
if (fps->last_xlog_end > XactLastRecEnd)
XactLastRecEnd = fps->last_xlog_end;
}
}
/*
* Wait for all workers to exit.
*
* This function ensures that workers have been completely shutdown. The
* difference between WaitForParallelWorkersToFinish and this function is
* that former just ensures that last message sent by worker backend is
* received by master backend whereas this ensures the complete shutdown.
*/
static void
WaitForParallelWorkersToExit(ParallelContext *pcxt)
{
int i;
/* Wait until the workers actually die. */
for (i = 0; i < pcxt->nworkers_launched; ++i)
{
BgwHandleStatus status;
if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
continue;
status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
/*
* If the postmaster kicked the bucket, we have no chance of cleaning
* up safely -- we won't be able to tell when our workers are actually
* dead. This doesn't necessitate a PANIC since they will all abort
* eventually, but we can't safely continue this session.
*/
if (status == BGWH_POSTMASTER_DIED)
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
errmsg("postmaster exited during a parallel transaction")));
/* Release memory. */
pfree(pcxt->worker[i].bgwhandle);
pcxt->worker[i].bgwhandle = NULL;
}
}
/*
* Destroy a parallel context.
*
* If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
* first, before calling this function. When this function is invoked, any
* remaining workers are forcibly killed; the dynamic shared memory segment
* is unmapped; and we then wait (uninterruptibly) for the workers to exit.
*/
void
DestroyParallelContext(ParallelContext *pcxt)
{
2015-05-24 03:35:49 +02:00
int i;
/*
* Be careful about order of operations here! We remove the parallel
* context from the list before we do anything else; otherwise, if an
* error occurs during a subsequent step, we might try to nuke it again
* from AtEOXact_Parallel or AtEOSubXact_Parallel.
*/
dlist_delete(&pcxt->node);
/* Kill each worker in turn, and forget their error queues. */
if (pcxt->worker != NULL)
{
for (i = 0; i < pcxt->nworkers_launched; ++i)
{
if (pcxt->worker[i].error_mqh != NULL)
{
TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
pfree(pcxt->worker[i].error_mqh);
pcxt->worker[i].error_mqh = NULL;
}
}
}
/*
* If we have allocated a shared memory segment, detach it. This will
* implicitly detach the error queues, and any other shared memory queues,
* stored there.
*/
if (pcxt->seg != NULL)
{
dsm_detach(pcxt->seg);
pcxt->seg = NULL;
}
/*
* If this parallel context is actually in backend-private memory rather
* than shared memory, free that memory instead.
*/
if (pcxt->private_memory != NULL)
{
pfree(pcxt->private_memory);
pcxt->private_memory = NULL;
}
/*
2016-06-10 00:02:36 +02:00
* We can't finish transaction commit or abort until all of the workers
* have exited. This means, in particular, that we can't respond to
* interrupts at this stage.
*/
HOLD_INTERRUPTS();
WaitForParallelWorkersToExit(pcxt);
RESUME_INTERRUPTS();
/* Free the worker array itself. */
if (pcxt->worker != NULL)
{
pfree(pcxt->worker);
pcxt->worker = NULL;
}
/* Free memory. */
pfree(pcxt);
}
/*
* Are there any parallel contexts currently active?
*/
bool
ParallelContextActive(void)
{
return !dlist_is_empty(&pcxt_list);
}
/*
* Handle receipt of an interrupt indicating a parallel worker message.
*
* Note: this is called within a signal handler! All we can do is set
* a flag that will cause the next CHECK_FOR_INTERRUPTS() to invoke
* HandleParallelMessages().
*/
void
HandleParallelMessageInterrupt(void)
{
InterruptPending = true;
ParallelMessagePending = true;
SetLatch(MyLatch);
}
/*
* Handle any queued protocol messages received from parallel workers.
*/
void
HandleParallelMessages(void)
{
dlist_iter iter;
MemoryContext oldcontext;
static MemoryContext hpm_context = NULL;
/*
* This is invoked from ProcessInterrupts(), and since some of the
* functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
* for recursive calls if more signals are received while this runs. It's
* unclear that recursive entry would be safe, and it doesn't seem useful
* even if it is safe, so let's block interrupts until done.
*/
HOLD_INTERRUPTS();
/*
* Moreover, CurrentMemoryContext might be pointing almost anywhere. We
* don't want to risk leaking data into long-lived contexts, so let's do
* our work here in a private context that we can reset on each use.
*/
if (hpm_context == NULL) /* first time through? */
hpm_context = AllocSetContextCreate(TopMemoryContext,
"HandleParallelMessages context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
else
MemoryContextReset(hpm_context);
oldcontext = MemoryContextSwitchTo(hpm_context);
/* OK to process messages. Reset the flag saying there are more to do. */
ParallelMessagePending = false;
dlist_foreach(iter, &pcxt_list)
{
ParallelContext *pcxt;
2015-05-24 03:35:49 +02:00
int i;
pcxt = dlist_container(ParallelContext, node, iter.cur);
if (pcxt->worker == NULL)
continue;
for (i = 0; i < pcxt->nworkers_launched; ++i)
{
/*
2015-05-24 03:35:49 +02:00
* Read as many messages as we can from each worker, but stop when
* either (1) the worker's error queue goes away, which can happen
* if we receive a Terminate message from the worker; or (2) no
* more messages can be read from the worker without blocking.
*/
while (pcxt->worker[i].error_mqh != NULL)
{
2015-05-24 03:35:49 +02:00
shm_mq_result res;
Size nbytes;
void *data;
res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
&data, true);
if (res == SHM_MQ_WOULD_BLOCK)
break;
else if (res == SHM_MQ_SUCCESS)
{
2015-05-24 03:35:49 +02:00
StringInfoData msg;
initStringInfo(&msg);
appendBinaryStringInfo(&msg, data, nbytes);
HandleParallelMessage(pcxt, i, &msg);
pfree(msg.data);
}
else
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("lost connection to parallel worker")));
}
}
}
MemoryContextSwitchTo(oldcontext);
/* Might as well clear the context on our way out */
MemoryContextReset(hpm_context);
RESUME_INTERRUPTS();
}
/*
* Handle a single protocol message received from a single parallel worker.
*/
static void
HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
{
2015-05-24 03:35:49 +02:00
char msgtype;
msgtype = pq_getmsgbyte(msg);
switch (msgtype)
{
2015-05-24 03:35:49 +02:00
case 'K': /* BackendKeyData */
{
2015-05-24 03:35:49 +02:00
int32 pid = pq_getmsgint(msg, 4);
(void) pq_getmsgint(msg, 4); /* discard cancel key */
(void) pq_getmsgend(msg);
pcxt->worker[i].pid = pid;
break;
}
2015-05-24 03:35:49 +02:00
case 'E': /* ErrorResponse */
case 'N': /* NoticeResponse */
{
ErrorData edata;
ErrorContextCallback *save_error_context_stack;
2015-10-22 20:51:49 +02:00
/* Parse ErrorResponse or NoticeResponse. */
pq_parse_errornotice(msg, &edata);
/* Death of a worker isn't enough justification for suicide. */
edata.elevel = Min(edata.elevel, ERROR);
/*
* If desired, add a context line to show that this is a
* message propagated from a parallel worker. Otherwise, it
* can sometimes be confusing to understand what actually
* happened. (We don't do this in FORCE_PARALLEL_REGRESS mode
* because it causes test-result instability depending on
* whether a parallel worker is actually used or not.)
*/
if (force_parallel_mode != FORCE_PARALLEL_REGRESS)
{
if (edata.context)
edata.context = psprintf("%s\n%s", edata.context,
_("parallel worker"));
else
edata.context = pstrdup(_("parallel worker"));
}
/*
* Context beyond that should use the error context callbacks
* that were in effect when the ParallelContext was created,
* not the current ones.
*/
save_error_context_stack = error_context_stack;
error_context_stack = pcxt->error_context_stack;
/* Rethrow error or print notice. */
ThrowErrorData(&edata);
/* Not an error, so restore previous context stack. */
error_context_stack = save_error_context_stack;
break;
}
2015-05-24 03:35:49 +02:00
case 'A': /* NotifyResponse */
{
/* Propagate NotifyResponse. */
int32 pid;
const char *channel;
const char *payload;
pid = pq_getmsgint(msg, 4);
channel = pq_getmsgrawstring(msg);
payload = pq_getmsgrawstring(msg);
pq_endmessage(msg);
NotifyMyFrontEnd(channel, payload, pid);
break;
}
2015-05-24 03:35:49 +02:00
case 'X': /* Terminate, indicating clean exit */
{
pfree(pcxt->worker[i].error_mqh);
pcxt->worker[i].error_mqh = NULL;
break;
}
default:
{
elog(ERROR, "unrecognized message type received from parallel worker: %c (message length %d bytes)",
msgtype, msg->len);
}
}
}
/*
* End-of-subtransaction cleanup for parallel contexts.
*
* Currently, it's forbidden to enter or leave a subtransaction while
* parallel mode is in effect, so we could just blow away everything. But
* we may want to relax that restriction in the future, so this code
* contemplates that there may be multiple subtransaction IDs in pcxt_list.
*/
void
AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
{
while (!dlist_is_empty(&pcxt_list))
{
ParallelContext *pcxt;
pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
if (pcxt->subid != mySubId)
break;
if (isCommit)
elog(WARNING, "leaked parallel context");
DestroyParallelContext(pcxt);
}
}
/*
* End-of-transaction cleanup for parallel contexts.
*/
void
AtEOXact_Parallel(bool isCommit)
{
while (!dlist_is_empty(&pcxt_list))
{
ParallelContext *pcxt;
pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
if (isCommit)
elog(WARNING, "leaked parallel context");
DestroyParallelContext(pcxt);
}
}
/*
* Main entrypoint for parallel workers.
*/
static void
ParallelWorkerMain(Datum main_arg)
{
dsm_segment *seg;
2015-05-24 03:35:49 +02:00
shm_toc *toc;
FixedParallelState *fps;
2015-05-24 03:35:49 +02:00
char *error_queue_space;
shm_mq *mq;
shm_mq_handle *mqh;
2015-05-24 03:35:49 +02:00
char *libraryspace;
char *gucspace;
char *combocidspace;
char *tsnapspace;
char *asnapspace;
char *tstatespace;
StringInfoData msgbuf;
/* Set flag to indicate that we're initializing a parallel worker. */
InitializingParallelWorker = true;
/* Establish signal handlers. */
pqsignal(SIGTERM, die);
BackgroundWorkerUnblockSignals();
/* Determine and set our parallel worker number. */
Assert(ParallelWorkerNumber == -1);
memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
/* Set up a memory context and resource owner. */
Assert(CurrentResourceOwner == NULL);
CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
"parallel worker",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/*
2015-05-24 03:35:49 +02:00
* Now that we have a resource owner, we can attach to the dynamic shared
* memory segment and read the table of contents.
*/
seg = dsm_attach(DatumGetUInt32(main_arg));
if (seg == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2015-11-17 03:16:42 +01:00
errmsg("could not map dynamic shared memory segment")));
toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2016-06-10 00:02:36 +02:00
errmsg("invalid magic number in dynamic shared memory segment")));
/* Look up fixed parallel state. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
Assert(fps != NULL);
MyFixedParallelState = fps;
/*
* Now that we have a worker number, we can find and attach to the error
* queue provided for us. That's good, because until we do that, any
* errors that happen here will not be reported back to the process that
* requested that this worker be launched.
*/
error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
mq = (shm_mq *) (error_queue_space +
2015-05-24 03:35:49 +02:00
ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
shm_mq_set_sender(mq, MyProc);
mqh = shm_mq_attach(mq, seg, NULL);
pq_redirect_to_shm_mq(seg, mqh);
pq_set_parallel_master(fps->parallel_master_pid,
fps->parallel_master_backend_id);
/*
* Send a BackendKeyData message to the process that initiated parallelism
* so that it has access to our PID before it receives any other messages
2015-05-24 03:35:49 +02:00
* from us. Our cancel key is sent, too, since that's the way the
* protocol message is defined, but it won't actually be used for anything
* in this case.
*/
pq_beginmessage(&msgbuf, 'K');
pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
pq_endmessage(&msgbuf);
/*
2015-05-24 03:35:49 +02:00
* Hooray! Primary initialization is complete. Now, we need to set up our
* backend-local state to match the original backend.
*/
/*
2016-06-10 00:02:36 +02:00
* Join locking group. We must do this before anything that could try to
* acquire a heavyweight lock, because any heavyweight locks acquired to
* this point could block either directly against the parallel group
* leader or against some process which in turn waits for a lock that
* conflicts with the parallel group leader, causing an undetected
* deadlock. (If we can't join the lock group, the leader has gone away,
* so just exit quietly.)
*/
if (!BecomeLockGroupMember(fps->parallel_master_pgproc,
fps->parallel_master_pid))
return;
/*
2015-05-24 03:35:49 +02:00
* Load libraries that were loaded by original backend. We want to do
* this before restoring GUCs, because the libraries might define custom
* variables.
*/
libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
Assert(libraryspace != NULL);
RestoreLibraryState(libraryspace);
/* Restore database connection. */
BackgroundWorkerInitializeConnectionByOid(fps->database_id,
fps->authenticated_user_id);
/*
* Set the client encoding to the database encoding, since that is what
* the leader will expect.
*/
SetClientEncoding(GetDatabaseEncoding());
/* Restore GUC values from launching backend. */
gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
Assert(gucspace != NULL);
StartTransactionCommand();
RestoreGUCState(gucspace);
CommitTransactionCommand();
/* Crank up a transaction state appropriate to a parallel worker. */
tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
StartParallelWorkerTransaction(tstatespace);
/* Restore combo CID state. */
combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
Assert(combocidspace != NULL);
RestoreComboCIDState(combocidspace);
/* Restore transaction snapshot. */
tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
Assert(tsnapspace != NULL);
RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
fps->parallel_master_pgproc);
/* Restore active snapshot. */
asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
Assert(asnapspace != NULL);
PushActiveSnapshot(RestoreSnapshot(asnapspace));
/*
* We've changed which tuples we can see, and must therefore invalidate
* system caches.
*/
InvalidateSystemCaches();
/* Restore user ID and security context. */
SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
Improve the situation for parallel query versus temp relations. Transmit the leader's temp-namespace state to workers. This is important because without it, the workers do not really have the same search path as the leader. For example, there is no good reason (and no extant code either) to prevent a worker from executing a temp function that the leader created previously; but as things stood it would fail to find the temp function, and then either fail or execute the wrong function entirely. We still prohibit a worker from creating a temp namespace on its own. In effect, a worker can only see the session's temp namespace if the leader had created it before starting the worker, which seems like the right semantics. Also, transmit the leader's BackendId to workers, and arrange for workers to use that when determining the physical file path of a temp relation belonging to their session. While the original intent was to prevent such accesses entirely, there were a number of holes in that, notably in places like dbsize.c which assume they can safely access temp rels of other sessions anyway. We might as well get this right, as a small down payment on someday allowing workers to access the leader's temp tables. (With this change, directly using "MyBackendId" as a relation or buffer backend ID is deprecated; you should use BackendIdForTempRelations() instead. I left a couple of such uses alone though, as they're not going to be reachable in parallel workers until we do something about localbuf.c.) Move the thou-shalt-not-access-thy-leader's-temp-tables prohibition down into localbuf.c, which is where it actually matters, instead of having it in relation_open(). This amounts to recognizing that access to temp tables' catalog entries is perfectly safe in a worker, it's only the data in local buffers that is problematic. Having done all that, we can get rid of the test in has_parallel_hazard() that says that use of a temp table's rowtype is unsafe in parallel workers. That test was unduly expensive, and if we really did need such a prohibition, that was not even close to being a bulletproof guard for it. (For example, any user-defined function executed in a parallel worker might have attempted such access.)
2016-06-10 02:16:11 +02:00
/* Restore temp-namespace state to ensure search path matches leader's. */
SetTempNamespaceState(fps->temp_namespace_id,
fps->temp_toast_namespace_id);
/* Set ParallelMasterBackendId so we know how to address temp relations. */
ParallelMasterBackendId = fps->parallel_master_backend_id;
/*
2015-05-24 03:35:49 +02:00
* We've initialized all of our state now; nothing should change
* hereafter.
*/
InitializingParallelWorker = false;
EnterParallelMode();
/*
* Time to do the real work: invoke the caller-supplied code.
*
* If you get a crash at this line, see the comments for
* ParallelExtensionTrampoline.
*/
fps->entrypoint(seg, toc);
/* Must exit parallel mode to pop active snapshot. */
ExitParallelMode();
/* Must pop active snapshot so resowner.c doesn't complain. */
PopActiveSnapshot();
/* Shut down the parallel-worker transaction. */
EndParallelWorkerTransaction();
/* Report success. */
pq_putmessage('X', NULL, 0);
}
/*
* It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
* function living in a dynamically loaded module, because the module might
* not be loaded in every process, or might be loaded but not at the same
* address. To work around that problem, CreateParallelContextForExtension()
* arranges to call this function rather than calling the extension-provided
* function directly; and this function then looks up the real entrypoint and
* calls it.
*/
static void
ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
{
2015-05-24 03:35:49 +02:00
char *extensionstate;
char *library_name;
char *function_name;
parallel_worker_main_type entrypt;
extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
Assert(extensionstate != NULL);
library_name = extensionstate;
function_name = extensionstate + strlen(library_name) + 1;
entrypt = (parallel_worker_main_type)
load_external_function(library_name, function_name, true, NULL);
entrypt(seg, toc);
}
/*
* Update shared memory with the ending location of the last WAL record we
* wrote, if it's greater than the value already stored there.
*/
void
ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
{
FixedParallelState *fps = MyFixedParallelState;
Assert(fps != NULL);
SpinLockAcquire(&fps->mutex);
if (fps->last_xlog_end < last_xlog_end)
fps->last_xlog_end = last_xlog_end;
SpinLockRelease(&fps->mutex);
}