Test code for shared memory message queue facility.

This code is intended as a demonstration of how the dynamic shared
memory and dynamic background worker facilities can be used to establish
a group of coooperating processes which can coordinate their activities
using the shared memory message queue facility.  By itself, the code
does nothing particularly interesting: it simply allows messages to
be passed through a loop of workers and back to the original process.
But it's a useful unit test, in addition to its demonstration value.
This commit is contained in:
Robert Haas 2014-01-14 12:24:12 -05:00
parent ec9037df26
commit 4db3744f1f
11 changed files with 932 additions and 0 deletions

View File

@ -51,6 +51,7 @@ SUBDIRS = \
tablefunc \
tcn \
test_parser \
test_shm_mq \
tsearch2 \
unaccent \
vacuumlo \

4
contrib/test_shm_mq/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# Generated subdirectories
/log/
/results/
/tmp_check/

View File

@ -0,0 +1,20 @@
# contrib/test_shm_mq/Makefile
MODULE_big = test_shm_mq
OBJS = test.o setup.o worker.o
EXTENSION = test_shm_mq
DATA = test_shm_mq--1.0.sql
REGRESS = test_shm_mq
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
else
subdir = contrib/test_shm_mq
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif

View File

@ -0,0 +1,18 @@
CREATE EXTENSION test_shm_mq;
--
-- These tests don't produce any interesting output. We're checking that
-- the operations complete without crashing or hanging and that none of their
-- internal sanity tests fail.
--
SELECT test_shm_mq(32768, (select string_agg(chr(32+(random()*96)::int), '') from generate_series(1,400)), 10000, 1);
test_shm_mq
-------------
(1 row)
SELECT test_shm_mq_pipelined(16384, (select string_agg(chr(32+(random()*96)::int), '') from generate_series(1,270000)), 200, 3);
test_shm_mq_pipelined
-----------------------
(1 row)

323
contrib/test_shm_mq/setup.c Normal file
View File

@ -0,0 +1,323 @@
/*--------------------------------------------------------------------------
*
* setup.c
* Code to set up a dynamic shared memory segments and a specified
* number of background workers for shared memory message queue
* testing.
*
* Copyright (C) 2013, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/test_shm_mq/setup.c
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "miscadmin.h"
#include "postmaster/bgworker.h"
#include "storage/procsignal.h"
#include "storage/shm_toc.h"
#include "utils/memutils.h"
#include "test_shm_mq.h"
typedef struct
{
int nworkers;
BackgroundWorkerHandle *handle[FLEXIBLE_ARRAY_MEMBER];
} worker_state;
static void setup_dynamic_shared_memory(uint64 queue_size, int nworkers,
dsm_segment **segp,
test_shm_mq_header **hdrp,
shm_mq **outp, shm_mq **inp);
static worker_state *setup_background_workers(int nworkers,
dsm_segment *seg);
static void cleanup_background_workers(dsm_segment *seg, Datum arg);
static void wait_for_workers_to_become_ready(worker_state *wstate,
volatile test_shm_mq_header *hdr);
static bool check_worker_status(worker_state *wstate);
/*
* Set up a dynamic shared memory segment and zero or more background workers
* for a test run.
*/
void
test_shm_mq_setup(uint64 queue_size, int32 nworkers, dsm_segment **segp,
shm_mq_handle **output, shm_mq_handle **input)
{
dsm_segment *seg;
test_shm_mq_header *hdr;
shm_mq *outq;
shm_mq *inq;
worker_state *wstate;
/* Set up a dynamic shared memory segment. */
setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
*segp = seg;
/* Register background workers. */
wstate = setup_background_workers(nworkers, seg);
/* Attach the queues. */
*output = shm_mq_attach(outq, seg, wstate->handle[0]);
*input = shm_mq_attach(inq, seg, wstate->handle[nworkers - 1]);
/* Wait for workers to become ready. */
wait_for_workers_to_become_ready(wstate, hdr);
/*
* Once we reach this point, all workers are ready. We no longer need
* to kill them if we die; they'll die on their own as the message queues
* shut down.
*/
cancel_on_dsm_detach(seg, cleanup_background_workers,
PointerGetDatum(wstate));
pfree(wstate);
}
/*
* Set up a dynamic shared memory segment.
*
* We set up a small control region that contains only a test_shm_mq_header,
* plus one region per message queue. There are as many message queues as
* the number of workers, plus one.
*/
static void
setup_dynamic_shared_memory(uint64 queue_size, int nworkers,
dsm_segment **segp, test_shm_mq_header **hdrp,
shm_mq **outp, shm_mq **inp)
{
shm_toc_estimator e;
int i;
uint64 segsize;
dsm_segment *seg;
shm_toc *toc;
test_shm_mq_header *hdr;
/* Ensure a valid queue size. */
if (queue_size < 0 || ((uint64) queue_size) < shm_mq_minimum_size)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("queue size must be at least " UINT64_FORMAT " bytes",
shm_mq_minimum_size)));
/*
* Estimate how much shared memory we need.
*
* Because the TOC machinery may choose to insert padding of oddly-sized
* requests, we must estimate each chunk separately.
*
* We need one key to register the location of the header, and we need
* nworkers + 1 keys to track the locations of the message queues.
*/
shm_toc_initialize_estimator(&e);
shm_toc_estimate_chunk(&e, sizeof(test_shm_mq_header));
for (i = 0; i <= nworkers; ++i)
shm_toc_estimate_chunk(&e, queue_size);
shm_toc_estimate_keys(&e, 2 + nworkers);
segsize = shm_toc_estimate(&e);
/* Create the shared memory segment and establish a table of contents. */
seg = dsm_create(shm_toc_estimate(&e));
toc = shm_toc_create(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg),
segsize);
/* Set up the header region. */
hdr = shm_toc_allocate(toc, sizeof(test_shm_mq_header));
SpinLockInit(&hdr->mutex);
hdr->workers_total = nworkers;
hdr->workers_attached = 0;
hdr->workers_ready = 0;
shm_toc_insert(toc, 0, hdr);
/* Set up one message queue per worker, plus one. */
for (i = 0; i <= nworkers; ++i)
{
shm_mq *mq;
mq = shm_mq_create(shm_toc_allocate(toc, queue_size), queue_size);
shm_toc_insert(toc, i + 1, mq);
if (i == 0)
{
/* We send messages to the first queue. */
shm_mq_set_sender(mq, MyProc);
*outp = mq;
}
if (i == nworkers)
{
/* We receive messages from the last queue. */
shm_mq_set_receiver(mq, MyProc);
*inp = mq;
}
}
/* Return results to caller. */
*segp = seg;
*hdrp = hdr;
}
/*
* Register background workers.
*/
static worker_state *
setup_background_workers(int nworkers, dsm_segment *seg)
{
MemoryContext oldcontext;
BackgroundWorker worker;
worker_state *wstate;
int i;
/*
* We need the worker_state object and the background worker handles to
* which it points to be allocated in CurTransactionContext rather than
* ExprContext; otherwise, they'll be destroyed before the on_dsm_detach
* hooks run.
*/
oldcontext = MemoryContextSwitchTo(CurTransactionContext);
/* Create worker state object. */
wstate = MemoryContextAlloc(TopTransactionContext,
offsetof(worker_state, handle) +
sizeof(BackgroundWorkerHandle *) * nworkers);
wstate->nworkers = 0;
/*
* Arrange to kill all the workers if we abort before all workers are
* finished hooking themselves up to the dynamic shared memory segment.
*
* If we die after all the workers have finished hooking themselves up
* to the dynamic shared memory segment, we'll mark the two queues to
* which we're directly connected as detached, and the worker(s)
* connected to those queues will exit, marking any other queues to
* which they are connected as detached. This will cause any
* as-yet-unaware workers connected to those queues to exit in their
* turn, and so on, until everybody exits.
*
* But suppose the workers which are supposed to connect to the queues
* to which we're directly attached exit due to some error before they
* actually attach the queues. The remaining workers will have no way of
* knowing this. From their perspective, they're still waiting for those
* workers to start, when in fact they've already died.
*/
on_dsm_detach(seg, cleanup_background_workers,
PointerGetDatum(wstate));
/* Configure a worker. */
worker.bgw_flags = BGWORKER_SHMEM_ACCESS;
worker.bgw_start_time = BgWorkerStart_ConsistentState;
worker.bgw_restart_time = BGW_NEVER_RESTART;
worker.bgw_main = NULL; /* new worker might not have library loaded */
sprintf(worker.bgw_library_name, "test_shm_mq");
sprintf(worker.bgw_function_name, "test_shm_mq_main");
snprintf(worker.bgw_name, BGW_MAXLEN, "test_shm_mq");
worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(seg));
/* set bgw_notify_pid, so we can detect if the worker stops */
worker.bgw_notify_pid = MyProcPid;
/* Register the workers. */
for (i = 0; i < nworkers; ++i)
{
if (!RegisterDynamicBackgroundWorker(&worker, &wstate->handle[i]))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("could not register background process"),
errhint("You may need to increase max_worker_processes.")));
++wstate->nworkers;
}
/* All done. */
MemoryContextSwitchTo(oldcontext);
return wstate;
}
static void
cleanup_background_workers(dsm_segment *seg, Datum arg)
{
worker_state *wstate = (worker_state *) DatumGetPointer(arg);
while (wstate->nworkers > 0)
{
--wstate->nworkers;
TerminateBackgroundWorker(wstate->handle[wstate->nworkers]);
}
}
static void
wait_for_workers_to_become_ready(worker_state *wstate,
volatile test_shm_mq_header *hdr)
{
bool save_set_latch_on_sigusr1;
bool result = false;
save_set_latch_on_sigusr1 = set_latch_on_sigusr1;
set_latch_on_sigusr1 = true;
PG_TRY();
{
for (;;)
{
int workers_ready;
/* If all the workers are ready, we have succeeded. */
SpinLockAcquire(&hdr->mutex);
workers_ready = hdr->workers_ready;
SpinLockRelease(&hdr->mutex);
if (workers_ready >= wstate->nworkers)
{
result = true;
break;
}
/* If any workers (or the postmaster) have died, we have failed. */
if (!check_worker_status(wstate))
{
result = false;
break;
}
/* Wait to be signalled. */
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
/* An interrupt may have occurred while we were waiting. */
CHECK_FOR_INTERRUPTS();
/* Reset the latch so we don't spin. */
ResetLatch(&MyProc->procLatch);
}
}
PG_CATCH();
{
set_latch_on_sigusr1 = save_set_latch_on_sigusr1;
PG_RE_THROW();
}
PG_END_TRY();
if (!result)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("one or more background workers failed to start")));
}
static bool
check_worker_status(worker_state *wstate)
{
int n;
/* If any workers (or the postmaster) have died, we have failed. */
for (n = 0; n < wstate->nworkers; ++n)
{
BgwHandleStatus status;
pid_t pid;
status = GetBackgroundWorkerPid(wstate->handle[n], &pid);
if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED)
return false;
}
/* Otherwise, things still look OK. */
return true;
}

View File

@ -0,0 +1,9 @@
CREATE EXTENSION test_shm_mq;
--
-- These tests don't produce any interesting output. We're checking that
-- the operations complete without crashing or hanging and that none of their
-- internal sanity tests fail.
--
SELECT test_shm_mq(32768, (select string_agg(chr(32+(random()*96)::int), '') from generate_series(1,400)), 10000, 1);
SELECT test_shm_mq_pipelined(16384, (select string_agg(chr(32+(random()*96)::int), '') from generate_series(1,270000)), 200, 3);

265
contrib/test_shm_mq/test.c Normal file
View File

@ -0,0 +1,265 @@
/*--------------------------------------------------------------------------
*
* test.c
* Test harness code for shared memory message queues.
*
* Copyright (C) 2013, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/test_shm_mq/test.c
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "fmgr.h"
#include "miscadmin.h"
#include "test_shm_mq.h"
PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(test_shm_mq);
PG_FUNCTION_INFO_V1(test_shm_mq_pipelined);
void _PG_init(void);
Datum test_shm_mq(PG_FUNCTION_ARGS);
Datum test_shm_mq_pipelined(PG_FUNCTION_ARGS);
static void verify_message(uint64 origlen, char *origdata, uint64 newlen,
char *newdata);
/*
* Simple test of the shared memory message queue infrastructure.
*
* We set up a ring of message queues passing through 1 or more background
* processes and eventually looping back to ourselves. We then send a message
* through the ring a number of times indicated by the loop count. At the end,
* we check whether the final message matches the one we started with.
*/
Datum
test_shm_mq(PG_FUNCTION_ARGS)
{
int64 queue_size = PG_GETARG_INT64(0);
text *message = PG_GETARG_TEXT_PP(1);
char *message_contents = VARDATA_ANY(message);
int message_size = VARSIZE_ANY_EXHDR(message);
int32 loop_count = PG_GETARG_INT32(2);
int32 nworkers = PG_GETARG_INT32(3);
dsm_segment *seg;
shm_mq_handle *outqh;
shm_mq_handle *inqh;
shm_mq_result res;
uint64 len;
void *data;
/* A negative loopcount is nonsensical. */
if (loop_count < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("repeat count size must be a non-negative integer")));
/*
* Since this test sends data using the blocking interfaces, it cannot
* send data to itself. Therefore, a minimum of 1 worker is required.
* Of course, a negative worker count is nonsensical.
*/
if (nworkers < 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("number of workers must be a positive integer")));
/* Set up dynamic shared memory segment and background workers. */
test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh);
/* Send the initial message. */
res = shm_mq_send(outqh, message_size, message_contents, false);
if (res != SHM_MQ_SUCCESS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not send message")));
/*
* Receive a message and send it back out again. Do this a number of
* times equal to the loop count.
*/
for (;;)
{
/* Receive a message. */
res = shm_mq_receive(inqh, &len, &data, false);
if (res != SHM_MQ_SUCCESS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not receive message")));
/* If this is supposed to be the last iteration, stop here. */
if (--loop_count <= 0)
break;
/* Send it back out. */
res = shm_mq_send(outqh, len, data, false);
if (res != SHM_MQ_SUCCESS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not send message")));
}
/*
* Finally, check that we got back the same message from the last
* iteration that we originally sent.
*/
verify_message(message_size, message_contents, len, data);
/* Clean up. */
dsm_detach(seg);
PG_RETURN_VOID();
}
/*
* Pipelined test of the shared memory message queue infrastructure.
*
* As in the basic test, we set up a ring of message queues passing through
* 1 or more background processes and eventually looping back to ourselves.
* Then, we send N copies of the user-specified message through the ring and
* receive them all back. Since this might fill up all message queues in the
* ring and then stall, we must be prepared to begin receiving the messages
* back before we've finished sending them.
*/
Datum
test_shm_mq_pipelined(PG_FUNCTION_ARGS)
{
int64 queue_size = PG_GETARG_INT64(0);
text *message = PG_GETARG_TEXT_PP(1);
char *message_contents = VARDATA_ANY(message);
int message_size = VARSIZE_ANY_EXHDR(message);
int32 loop_count = PG_GETARG_INT32(2);
int32 nworkers = PG_GETARG_INT32(3);
bool verify = PG_GETARG_BOOL(4);
int32 send_count = 0;
int32 receive_count = 0;
dsm_segment *seg;
shm_mq_handle *outqh;
shm_mq_handle *inqh;
shm_mq_result res;
uint64 len;
void *data;
/* A negative loopcount is nonsensical. */
if (loop_count < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("repeat count size must be a non-negative integer")));
/*
* Using the nonblocking interfaces, we can even send data to ourselves,
* so the minimum number of workers for this test is zero.
*/
if (nworkers < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("number of workers must be a non-negative integer")));
/* Set up dynamic shared memory segment and background workers. */
test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh);
/* Main loop. */
for (;;)
{
bool wait = true;
/*
* If we haven't yet sent the message the requisite number of times,
* try again to send it now. Note that when shm_mq_send() returns
* SHM_MQ_WOULD_BLOCK, the next call to that function must pass the
* same message size and contents; that's not an issue here because
* we're sending the same message every time.
*/
if (send_count < loop_count)
{
res = shm_mq_send(outqh, message_size, message_contents, true);
if (res == SHM_MQ_SUCCESS)
{
++send_count;
wait = false;
}
else if (res == SHM_MQ_DETACHED)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not send message")));
}
/*
* If we haven't yet received the message the requisite number of
* times, try to receive it again now.
*/
if (receive_count < loop_count)
{
res = shm_mq_receive(inqh, &len, &data, true);
if (res == SHM_MQ_SUCCESS)
{
++receive_count;
/* Verifying every time is slow, so it's optional. */
if (verify)
verify_message(message_size, message_contents, len, data);
wait = false;
}
else if (res == SHM_MQ_DETACHED)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not receive message")));
}
else
{
/*
* Otherwise, we've received the message enough times. This
* shouldn't happen unless we've also sent it enough times.
*/
if (send_count != receive_count)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("message sent %d times, but received %d times",
send_count, receive_count)));
break;
}
if (wait)
{
/*
* If we made no progress, wait for one of the other processes
* to which we are connected to set our latch, indicating that
* they have read or written data and therefore there may now be
* work for us to do.
*/
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
CHECK_FOR_INTERRUPTS();
ResetLatch(&MyProc->procLatch);
}
}
/* Clean up. */
dsm_detach(seg);
PG_RETURN_VOID();
}
/*
* Verify that two messages are the same.
*/
static void
verify_message(uint64 origlen, char *origdata, uint64 newlen, char *newdata)
{
uint64 i;
if (origlen != newlen)
ereport(ERROR,
(errmsg("message corrupted"),
errdetail("The original message was " UINT64_FORMAT " bytes but the final message is " UINT64_FORMAT " bytes.",
origlen, newlen)));
for (i = 0; i < origlen; ++i)
if (origdata[i] != newdata[i])
ereport(ERROR,
(errmsg("message corrupted"),
errdetail("The new and original messages differ at byte " UINT64_FORMAT " of " UINT64_FORMAT ".", i, origlen)));
}

View File

@ -0,0 +1,19 @@
/* contrib/test_shm_mq/test_shm_mq--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION test_shm_mq" to load this file. \quit
CREATE FUNCTION test_shm_mq(queue_size pg_catalog.int8,
message pg_catalog.text,
repeat_count pg_catalog.int4 default 1,
num_workers pg_catalog.int4 default 1)
RETURNS pg_catalog.void STRICT
AS 'MODULE_PATHNAME' LANGUAGE C;
CREATE FUNCTION test_shm_mq_pipelined(queue_size pg_catalog.int8,
message pg_catalog.text,
repeat_count pg_catalog.int4 default 1,
num_workers pg_catalog.int4 default 1,
verify pg_catalog.bool default true)
RETURNS pg_catalog.void STRICT
AS 'MODULE_PATHNAME' LANGUAGE C;

View File

@ -0,0 +1,4 @@
comment = 'Test code for shared memory message queues'
default_version = '1.0'
module_pathname = '$libdir/test_shm_mq'
relocatable = true

View File

@ -0,0 +1,45 @@
/*--------------------------------------------------------------------------
*
* test_shm_mq.h
* Definitions for shared memory message queues
*
* Copyright (C) 2013, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/test_shm_mq/test_shm_mq.h
*
* -------------------------------------------------------------------------
*/
#ifndef TEST_SHM_MQ_H
#define TEST_SHM_MQ_H
#include "storage/dsm.h"
#include "storage/shm_mq.h"
#include "storage/spin.h"
/* Identifier for shared memory segments used by this extension. */
#define PG_TEST_SHM_MQ_MAGIC 0x79fb2447
/*
* This structure is stored in the dynamic shared memory segment. We use
* it to determine whether all workers started up OK and successfully
* attached to their respective shared message queues.
*/
typedef struct
{
slock_t mutex;
int workers_total;
int workers_attached;
int workers_ready;
} test_shm_mq_header;
/* Set up dynamic shared memory and background workers for test run. */
extern void test_shm_mq_setup(uint64 queue_size, int32 nworkers,
dsm_segment **seg, shm_mq_handle **output,
shm_mq_handle **input);
/* Main entrypoint for a worker. */
extern void test_shm_mq_main(Datum);
#endif

View File

@ -0,0 +1,224 @@
/*--------------------------------------------------------------------------
*
* worker.c
* Code for sample worker making use of shared memory message queues.
* Our test worker simply reads messages from one message queue and
* writes them back out to another message queue. In a real
* application, you'd presumably want the worker to do some more
* complex calculation rather than simply returning the input,
* but it should be possible to use much of the control logic just
* as presented here.
*
* Copyright (C) 2013, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/test_shm_mq/worker.c
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "miscadmin.h"
#include "storage/ipc.h"
#include "storage/procarray.h"
#include "storage/shm_mq.h"
#include "storage/shm_toc.h"
#include "utils/resowner.h"
#include "test_shm_mq.h"
static void handle_sigterm(SIGNAL_ARGS);
static void attach_to_queues(dsm_segment *seg, shm_toc *toc,
int myworkernumber, shm_mq_handle **inqhp,
shm_mq_handle **outqhp);
static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh);
/*
* Background worker entrypoint.
*
* This is intended to demonstrate how a background worker can be used to
* facilitate a parallel computation. Most of the logic here is fairly
* boilerplate stuff, designed to attach to the shared memory segment,
* notify the user backend that we're alive, and so on. The
* application-specific bits of logic that you'd replace for your own worker
* are attach_to_queues() and copy_messages().
*/
void
test_shm_mq_main(Datum main_arg)
{
dsm_segment *seg;
shm_toc *toc;
shm_mq_handle *inqh;
shm_mq_handle *outqh;
volatile test_shm_mq_header *hdr;
int myworkernumber;
PGPROC *registrant;
/*
* Establish signal handlers.
*
* We want CHECK_FOR_INTERRUPTS() to kill off this worker process just
* as it would a normal user backend. To make that happen, we establish
* a signal handler that is a stripped-down version of die(). We don't
* have any equivalent of the backend's command-read loop, where interrupts
* can be processed immediately, so make sure ImmediateInterruptOK is
* turned off.
*/
pqsignal(SIGTERM, handle_sigterm);
ImmediateInterruptOK = false;
BackgroundWorkerUnblockSignals();
/*
* Connect to the dynamic shared memory segment.
*
* The backend that registered this worker passed us the ID of a shared
* memory segment to which we must attach for further instructions. In
* order to attach to dynamic shared memory, we need a resource owner.
* Once we've mapped the segment in our address space, attach to the table
* of contents so we can locate the various data structures we'll need
* to find within the segment.
*/
CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker");
seg = dsm_attach(DatumGetInt32(main_arg));
if (seg == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("unable to map dynamic shared memory segment")));
toc = shm_toc_attach(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg));
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("bad magic number in dynamic shared memory segment")));
/*
* Acquire a worker number.
*
* By convention, the process registering this background worker should
* have stored the control structure at key 0. We look up that key to
* find it. Our worker number gives our identity: there may be just one
* worker involved in this parallel operation, or there may be many.
*/
hdr = shm_toc_lookup(toc, 0);
SpinLockAcquire(&hdr->mutex);
myworkernumber = ++hdr->workers_attached;
SpinLockRelease(&hdr->mutex);
if (myworkernumber > hdr->workers_total)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("too many message queue testing workers already")));
/*
* Attach to the appropriate message queues.
*/
attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);
/*
* Indicate that we're fully initialized and ready to begin the main
* part of the parallel operation.
*
* Once we signal that we're ready, the user backend is entitled to assume
* that our on_dsm_detach callbacks will fire before we disconnect from
* the shared memory segment and exit. Generally, that means we must have
* attached to all relevant dynamic shared memory data structures by now.
*/
SpinLockAcquire(&hdr->mutex);
++hdr->workers_ready;
SpinLockRelease(&hdr->mutex);
registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid);
if (registrant == NULL)
{
elog(DEBUG1, "registrant backend has exited prematurely");
proc_exit(1);
}
SetLatch(&registrant->procLatch);
/* Do the work. */
copy_messages(inqh, outqh);
/*
* We're done. Explicitly detach the shared memory segment so that we
* don't get a resource leak warning at commit time. This will fire any
* on_dsm_detach callbacks we've registered, as well. Once that's done,
* we can go ahead and exit.
*/
dsm_detach(seg);
proc_exit(1);
}
/*
* Attach to shared memory message queues.
*
* We use our worker number to determine to which queue we should attach.
* The queues are registered at keys 1..<number-of-workers>. The user backend
* writes to queue #1 and reads from queue #<number-of-workers>; each worker
* reads from the queue whose number is equal to its worker number and writes
* to the next higher-numbered queue.
*/
static void
attach_to_queues(dsm_segment *seg, shm_toc *toc, int myworkernumber,
shm_mq_handle **inqhp, shm_mq_handle **outqhp)
{
shm_mq *inq;
shm_mq *outq;
inq = shm_toc_lookup(toc, myworkernumber);
shm_mq_set_receiver(inq, MyProc);
*inqhp = shm_mq_attach(inq, seg, NULL);
outq = shm_toc_lookup(toc, myworkernumber + 1);
shm_mq_set_sender(outq, MyProc);
*outqhp = shm_mq_attach(outq, seg, NULL);
}
/*
* Loop, receiving and sending messages, until the connection is broken.
*
* This is the "real work" performed by this worker process. Everything that
* happens before this is initialization of one form or another, and everything
* after this point is cleanup.
*/
static void
copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh)
{
uint64 len;
void *data;
shm_mq_result res;
for (;;)
{
/* Notice any interrupts that have occurred. */
CHECK_FOR_INTERRUPTS();
/* Receive a message. */
res = shm_mq_receive(inqh, &len, &data, false);
if (res != SHM_MQ_SUCCESS)
break;
/* Send it back out. */
res = shm_mq_send(outqh, len, data, false);
if (res != SHM_MQ_SUCCESS)
break;
}
}
/*
* When we receive a SIGTERM, we set InterruptPending and ProcDiePending just
* like a normal backend. The next CHECK_FOR_INTERRUPTS() will do the right
* thing.
*/
static void
handle_sigterm(SIGNAL_ARGS)
{
int save_errno = errno;
if (MyProc)
SetLatch(&MyProc->procLatch);
if (!proc_exit_inprogress)
{
InterruptPending = true;
ProcDiePending = true;
}
errno = save_errno;
}