2014-01-14 18:23:22 +01:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* shm_mq.h
|
|
|
|
* single-reader, single-writer shared memory message queue
|
|
|
|
*
|
2019-01-02 18:44:25 +01:00
|
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
2014-01-14 18:23:22 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
* src/include/storage/shm_mq.h
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#ifndef SHM_MQ_H
|
|
|
|
#define SHM_MQ_H
|
|
|
|
|
|
|
|
#include "postmaster/bgworker.h"
|
|
|
|
#include "storage/dsm.h"
|
|
|
|
#include "storage/proc.h"
|
|
|
|
|
|
|
|
/* The queue itself, in shared memory. */
|
|
|
|
struct shm_mq;
|
|
|
|
typedef struct shm_mq shm_mq;
|
|
|
|
|
|
|
|
/* Backend-private state. */
|
|
|
|
struct shm_mq_handle;
|
|
|
|
typedef struct shm_mq_handle shm_mq_handle;
|
|
|
|
|
2014-10-08 20:35:43 +02:00
|
|
|
/* Descriptors for a single write spanning multiple locations. */
|
|
|
|
typedef struct
|
|
|
|
{
|
2015-05-24 03:35:49 +02:00
|
|
|
const char *data;
|
|
|
|
Size len;
|
2014-10-08 20:35:43 +02:00
|
|
|
} shm_mq_iovec;
|
|
|
|
|
2014-01-14 18:23:22 +01:00
|
|
|
/* Possible results of a send or receive operation. */
|
|
|
|
typedef enum
|
|
|
|
{
|
2014-05-06 18:12:18 +02:00
|
|
|
SHM_MQ_SUCCESS, /* Sent or received a message. */
|
|
|
|
SHM_MQ_WOULD_BLOCK, /* Not completed; retry later. */
|
|
|
|
SHM_MQ_DETACHED /* Other process has detached queue. */
|
2014-01-14 18:23:22 +01:00
|
|
|
} shm_mq_result;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Primitives to create a queue and set the sender and receiver.
|
|
|
|
*
|
|
|
|
* Both the sender and the receiver must be set before any messages are read
|
|
|
|
* or written, but they need not be set by the same process. Each must be
|
|
|
|
* set exactly once.
|
|
|
|
*/
|
|
|
|
extern shm_mq *shm_mq_create(void *address, Size size);
|
|
|
|
extern void shm_mq_set_receiver(shm_mq *mq, PGPROC *);
|
|
|
|
extern void shm_mq_set_sender(shm_mq *mq, PGPROC *);
|
|
|
|
|
|
|
|
/* Accessor methods for sender and receiver. */
|
|
|
|
extern PGPROC *shm_mq_get_receiver(shm_mq *);
|
|
|
|
extern PGPROC *shm_mq_get_sender(shm_mq *);
|
|
|
|
|
|
|
|
/* Set up backend-local queue state. */
|
|
|
|
extern shm_mq_handle *shm_mq_attach(shm_mq *mq, dsm_segment *seg,
|
|
|
|
BackgroundWorkerHandle *handle);
|
|
|
|
|
2014-10-08 20:35:43 +02:00
|
|
|
/* Associate worker handle with shm_mq. */
|
|
|
|
extern void shm_mq_set_handle(shm_mq_handle *, BackgroundWorkerHandle *);
|
|
|
|
|
Clean up shm_mq cleanup.
The logic around shm_mq_detach was a few bricks shy of a load, because
(contrary to the comments for shm_mq_attach) all it did was update the
shared shm_mq state. That left us leaking a bit of process-local
memory, but much worse, the on_dsm_detach callback for shm_mq_detach
was still armed. That means that whenever we ultimately detach from
the DSM segment, we'd run shm_mq_detach again for already-detached,
possibly long-dead queues. This accidentally fails to fail today,
because we only ever re-use a shm_mq's memory for another shm_mq, and
multiple detach attempts on the last such shm_mq are fairly harmless.
But it's gonna bite us someday, so let's clean it up.
To do that, change shm_mq_detach's API so it takes a shm_mq_handle
not the underlying shm_mq. This makes the callers simpler in most
cases anyway. Also fix a few places in parallel.c that were just
pfree'ing the handle structs rather than doing proper cleanup.
Back-patch to v10 because of the risk that the revenant shm_mq_detach
callbacks would cause a live bug sometime. Since this is an API
change, it's too late to do it in 9.6. (We could make a variant
patch that preserves API, but I'm not excited enough to do that.)
Discussion: https://postgr.es/m/8670.1504192177@sss.pgh.pa.us
2017-08-31 21:10:24 +02:00
|
|
|
/* Break connection, release handle resources. */
|
|
|
|
extern void shm_mq_detach(shm_mq_handle *mqh);
|
2014-01-14 18:23:22 +01:00
|
|
|
|
Glue layer to connect the executor to the shm_mq mechanism.
The shm_mq mechanism was built to send error (and notice) messages and
tuples between backends. However, shm_mq itself only deals in raw
bytes. Since commit 2bd9e412f92bc6a68f3e8bcb18e04955cc35001d, we have
had infrastructure for one message to redirect protocol messages to a
queue and for another backend to parse them and do useful things with
them. This commit introduces a somewhat analogous facility for tuples
by adding a new type of DestReceiver, DestTupleQueue, which writes
each tuple generated by a query into a shm_mq, and a new
TupleQueueFunnel facility which reads raw tuples out of the queue and
reconstructs the HeapTuple format expected by the executor.
The TupleQueueFunnel abstraction supports reading from multiple tuple
streams at the same time, but only in round-robin fashion. Someone
could imaginably want other policies, but this should be good enough
to meet our short-term needs related to parallel query, and we can
always extend it later.
This also makes one minor addition to the shm_mq API that didn'
seem worth breaking out as a separate patch.
Extracted from Amit Kapila's parallel sequential scan patch. This
code was originally written by me, and then it was revised by Amit,
and then it was revised some more by me.
2015-09-19 03:10:08 +02:00
|
|
|
/* Get the shm_mq from handle. */
|
|
|
|
extern shm_mq *shm_mq_get_queue(shm_mq_handle *mqh);
|
|
|
|
|
2014-01-14 18:23:22 +01:00
|
|
|
/* Send or receive messages. */
|
|
|
|
extern shm_mq_result shm_mq_send(shm_mq_handle *mqh,
|
2014-10-08 20:35:43 +02:00
|
|
|
Size nbytes, const void *data, bool nowait);
|
|
|
|
extern shm_mq_result shm_mq_sendv(shm_mq_handle *mqh,
|
2015-05-24 03:35:49 +02:00
|
|
|
shm_mq_iovec *iov, int iovcnt, bool nowait);
|
2014-01-14 18:23:22 +01:00
|
|
|
extern shm_mq_result shm_mq_receive(shm_mq_handle *mqh,
|
2014-03-18 16:19:13 +01:00
|
|
|
Size *nbytesp, void **datap, bool nowait);
|
2014-01-14 18:23:22 +01:00
|
|
|
|
|
|
|
/* Wait for our counterparty to attach to the queue. */
|
|
|
|
extern shm_mq_result shm_mq_wait_for_attach(shm_mq_handle *mqh);
|
|
|
|
|
|
|
|
/* Smallest possible queue. */
|
2014-01-18 21:29:45 +01:00
|
|
|
extern PGDLLIMPORT const Size shm_mq_minimum_size;
|
2014-01-14 18:23:22 +01:00
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
#endif /* SHM_MQ_H */
|