2014-10-31 17:02:40 +01:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pqmq.c
|
|
|
|
* Use the frontend/backend protocol for communication over a shm_mq
|
|
|
|
*
|
2022-01-08 01:04:57 +01:00
|
|
|
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
|
2014-10-31 17:02:40 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
* src/backend/libpq/pqmq.c
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
#include "libpq/libpq.h"
|
|
|
|
#include "libpq/pqformat.h"
|
|
|
|
#include "libpq/pqmq.h"
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
#include "miscadmin.h"
|
2016-10-04 16:50:13 +02:00
|
|
|
#include "pgstat.h"
|
2014-10-31 17:02:40 +01:00
|
|
|
#include "tcop/tcopprot.h"
|
|
|
|
#include "utils/builtins.h"
|
|
|
|
|
|
|
|
static shm_mq_handle *pq_mq_handle;
|
|
|
|
static bool pq_mq_busy = false;
|
2020-06-14 23:22:47 +02:00
|
|
|
static pid_t pq_mq_parallel_leader_pid = 0;
|
|
|
|
static pid_t pq_mq_parallel_leader_backend_id = InvalidBackendId;
|
2014-10-31 17:02:40 +01:00
|
|
|
|
2015-10-16 15:42:33 +02:00
|
|
|
static void pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg);
|
2014-10-31 17:02:40 +01:00
|
|
|
static void mq_comm_reset(void);
|
|
|
|
static int mq_flush(void);
|
|
|
|
static int mq_flush_if_writable(void);
|
|
|
|
static bool mq_is_send_pending(void);
|
|
|
|
static int mq_putmessage(char msgtype, const char *s, size_t len);
|
|
|
|
static void mq_putmessage_noblock(char msgtype, const char *s, size_t len);
|
|
|
|
|
2018-10-16 05:45:30 +02:00
|
|
|
static const PQcommMethods PqCommMqMethods = {
|
2014-10-31 17:02:40 +01:00
|
|
|
mq_comm_reset,
|
|
|
|
mq_flush,
|
|
|
|
mq_flush_if_writable,
|
|
|
|
mq_is_send_pending,
|
|
|
|
mq_putmessage,
|
2021-03-04 09:45:55 +01:00
|
|
|
mq_putmessage_noblock
|
2014-10-31 17:02:40 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Arrange to redirect frontend/backend protocol messages to a shared-memory
|
|
|
|
* message queue.
|
|
|
|
*/
|
|
|
|
void
|
2015-10-16 15:42:33 +02:00
|
|
|
pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh)
|
2014-10-31 17:02:40 +01:00
|
|
|
{
|
|
|
|
PqCommMethods = &PqCommMqMethods;
|
|
|
|
pq_mq_handle = mqh;
|
|
|
|
whereToSendOutput = DestRemote;
|
|
|
|
FrontendProtocol = PG_PROTOCOL_LATEST;
|
2015-10-16 15:42:33 +02:00
|
|
|
on_dsm_detach(seg, pq_cleanup_redirect_to_shm_mq, (Datum) 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the DSM that contains our shm_mq goes away, we need to stop sending
|
|
|
|
* messages to it.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg)
|
|
|
|
{
|
|
|
|
pq_mq_handle = NULL;
|
|
|
|
whereToSendOutput = DestNone;
|
2014-10-31 17:02:40 +01:00
|
|
|
}
|
|
|
|
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
/*
|
2020-06-14 23:22:47 +02:00
|
|
|
* Arrange to SendProcSignal() to the parallel leader each time we transmit
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
* message data via the shm_mq.
|
|
|
|
*/
|
|
|
|
void
|
2020-06-14 23:22:47 +02:00
|
|
|
pq_set_parallel_leader(pid_t pid, BackendId backend_id)
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
{
|
|
|
|
Assert(PqCommMethods == &PqCommMqMethods);
|
2020-06-14 23:22:47 +02:00
|
|
|
pq_mq_parallel_leader_pid = pid;
|
|
|
|
pq_mq_parallel_leader_backend_id = backend_id;
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
}
|
|
|
|
|
2014-10-31 17:02:40 +01:00
|
|
|
static void
|
|
|
|
mq_comm_reset(void)
|
|
|
|
{
|
|
|
|
/* Nothing to do. */
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
mq_flush(void)
|
|
|
|
{
|
|
|
|
/* Nothing to do. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
mq_flush_if_writable(void)
|
|
|
|
{
|
|
|
|
/* Nothing to do. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
mq_is_send_pending(void)
|
|
|
|
{
|
|
|
|
/* There's never anything pending. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transmit a libpq protocol message to the shared memory message queue
|
|
|
|
* selected via pq_mq_handle. We don't include a length word, because the
|
|
|
|
* receiver will know the length of the message from shm_mq_receive().
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mq_putmessage(char msgtype, const char *s, size_t len)
|
|
|
|
{
|
|
|
|
shm_mq_iovec iov[2];
|
|
|
|
shm_mq_result result;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're sending a message, and we have to wait because the queue is
|
|
|
|
* full, and then we get interrupted, and that interrupt results in trying
|
|
|
|
* to send another message, we respond by detaching the queue. There's no
|
|
|
|
* way to return to the original context, but even if there were, just
|
|
|
|
* queueing the message would amount to indefinitely postponing the
|
|
|
|
* response to the interrupt. So we do this instead.
|
|
|
|
*/
|
|
|
|
if (pq_mq_busy)
|
|
|
|
{
|
Clean up shm_mq cleanup.
The logic around shm_mq_detach was a few bricks shy of a load, because
(contrary to the comments for shm_mq_attach) all it did was update the
shared shm_mq state. That left us leaking a bit of process-local
memory, but much worse, the on_dsm_detach callback for shm_mq_detach
was still armed. That means that whenever we ultimately detach from
the DSM segment, we'd run shm_mq_detach again for already-detached,
possibly long-dead queues. This accidentally fails to fail today,
because we only ever re-use a shm_mq's memory for another shm_mq, and
multiple detach attempts on the last such shm_mq are fairly harmless.
But it's gonna bite us someday, so let's clean it up.
To do that, change shm_mq_detach's API so it takes a shm_mq_handle
not the underlying shm_mq. This makes the callers simpler in most
cases anyway. Also fix a few places in parallel.c that were just
pfree'ing the handle structs rather than doing proper cleanup.
Back-patch to v10 because of the risk that the revenant shm_mq_detach
callbacks would cause a live bug sometime. Since this is an API
change, it's too late to do it in 9.6. (We could make a variant
patch that preserves API, but I'm not excited enough to do that.)
Discussion: https://postgr.es/m/8670.1504192177@sss.pgh.pa.us
2017-08-31 21:10:24 +02:00
|
|
|
if (pq_mq_handle != NULL)
|
|
|
|
shm_mq_detach(pq_mq_handle);
|
2015-10-16 15:42:33 +02:00
|
|
|
pq_mq_handle = NULL;
|
2014-10-31 17:02:40 +01:00
|
|
|
return EOF;
|
|
|
|
}
|
|
|
|
|
2015-10-16 15:42:33 +02:00
|
|
|
/*
|
|
|
|
* If the message queue is already gone, just ignore the message. This
|
|
|
|
* doesn't necessarily indicate a problem; for example, DEBUG messages can
|
|
|
|
* be generated late in the shutdown sequence, after all DSMs have already
|
|
|
|
* been detached.
|
|
|
|
*/
|
Clean up shm_mq cleanup.
The logic around shm_mq_detach was a few bricks shy of a load, because
(contrary to the comments for shm_mq_attach) all it did was update the
shared shm_mq state. That left us leaking a bit of process-local
memory, but much worse, the on_dsm_detach callback for shm_mq_detach
was still armed. That means that whenever we ultimately detach from
the DSM segment, we'd run shm_mq_detach again for already-detached,
possibly long-dead queues. This accidentally fails to fail today,
because we only ever re-use a shm_mq's memory for another shm_mq, and
multiple detach attempts on the last such shm_mq are fairly harmless.
But it's gonna bite us someday, so let's clean it up.
To do that, change shm_mq_detach's API so it takes a shm_mq_handle
not the underlying shm_mq. This makes the callers simpler in most
cases anyway. Also fix a few places in parallel.c that were just
pfree'ing the handle structs rather than doing proper cleanup.
Back-patch to v10 because of the risk that the revenant shm_mq_detach
callbacks would cause a live bug sometime. Since this is an API
change, it's too late to do it in 9.6. (We could make a variant
patch that preserves API, but I'm not excited enough to do that.)
Discussion: https://postgr.es/m/8670.1504192177@sss.pgh.pa.us
2017-08-31 21:10:24 +02:00
|
|
|
if (pq_mq_handle == NULL)
|
2015-10-16 15:42:33 +02:00
|
|
|
return 0;
|
|
|
|
|
2014-10-31 17:02:40 +01:00
|
|
|
pq_mq_busy = true;
|
|
|
|
|
|
|
|
iov[0].data = &msgtype;
|
|
|
|
iov[0].len = 1;
|
|
|
|
iov[1].data = s;
|
|
|
|
iov[1].len = len;
|
|
|
|
|
|
|
|
Assert(pq_mq_handle != NULL);
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
2021-10-14 22:06:43 +02:00
|
|
|
/*
|
|
|
|
* Immediately notify the receiver by passing force_flush as true so
|
|
|
|
* that the shared memory value is updated before we send the parallel
|
|
|
|
* message signal right after this.
|
|
|
|
*/
|
|
|
|
result = shm_mq_sendv(pq_mq_handle, iov, 2, true, true);
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
|
2020-06-14 23:22:47 +02:00
|
|
|
if (pq_mq_parallel_leader_pid != 0)
|
|
|
|
SendProcSignal(pq_mq_parallel_leader_pid,
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
PROCSIG_PARALLEL_MESSAGE,
|
2020-06-14 23:22:47 +02:00
|
|
|
pq_mq_parallel_leader_backend_id);
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
|
|
|
|
if (result != SHM_MQ_WOULD_BLOCK)
|
|
|
|
break;
|
|
|
|
|
Add WL_EXIT_ON_PM_DEATH pseudo-event.
Users of the WaitEventSet and WaitLatch() APIs can now choose between
asking for WL_POSTMASTER_DEATH and then handling it explicitly, or asking
for WL_EXIT_ON_PM_DEATH to trigger immediate exit on postmaster death.
This reduces code duplication, since almost all callers want the latter.
Repair all code that was previously ignoring postmaster death completely,
or requesting the event but ignoring it, or requesting the event but then
doing an unconditional PostmasterIsAlive() call every time through its
event loop (which is an expensive syscall on platforms for which we don't
have USE_POSTMASTER_DEATH_SIGNAL support).
Assert that callers of WaitLatchXXX() under the postmaster remember to
ask for either WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH, to prevent
future bugs.
The only process that doesn't handle postmaster death is syslogger. It
waits until all backends holding the write end of the syslog pipe
(including the postmaster) have closed it by exiting, to be sure to
capture any parting messages. By using the WaitEventSet API directly
it avoids the new assertion, and as a by-product it may be slightly
more efficient on platforms that have epoll().
Author: Thomas Munro
Reviewed-by: Kyotaro Horiguchi, Heikki Linnakangas, Tom Lane
Discussion: https://postgr.es/m/CAEepm%3D1TCviRykkUb69ppWLr_V697rzd1j3eZsRMmbXvETfqbQ%40mail.gmail.com,
https://postgr.es/m/CAEepm=2LqHzizbe7muD7-2yHUbTOoF7Q+qkSD5Q41kuhttRTwA@mail.gmail.com
2018-11-23 08:16:41 +01:00
|
|
|
(void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
|
|
|
|
WAIT_EVENT_MQ_PUT_MESSAGE);
|
2017-06-07 01:13:00 +02:00
|
|
|
ResetLatch(MyLatch);
|
2016-08-01 21:13:53 +02:00
|
|
|
CHECK_FOR_INTERRUPTS();
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
}
|
2014-10-31 17:02:40 +01:00
|
|
|
|
|
|
|
pq_mq_busy = false;
|
|
|
|
|
|
|
|
Assert(result == SHM_MQ_SUCCESS || result == SHM_MQ_DETACHED);
|
|
|
|
if (result != SHM_MQ_SUCCESS)
|
|
|
|
return EOF;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mq_putmessage_noblock(char msgtype, const char *s, size_t len)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* While the shm_mq machinery does support sending a message in
|
|
|
|
* non-blocking mode, there's currently no way to try sending beginning to
|
|
|
|
* send the message that doesn't also commit us to completing the
|
|
|
|
* transmission. This could be improved in the future, but for now we
|
|
|
|
* don't need it.
|
|
|
|
*/
|
|
|
|
elog(ERROR, "not currently supported");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse an ErrorResponse or NoticeResponse payload and populate an ErrorData
|
|
|
|
* structure with the results.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
pq_parse_errornotice(StringInfo msg, ErrorData *edata)
|
|
|
|
{
|
|
|
|
/* Initialize edata with reasonable defaults. */
|
|
|
|
MemSet(edata, 0, sizeof(ErrorData));
|
|
|
|
edata->elevel = ERROR;
|
|
|
|
edata->assoc_context = CurrentMemoryContext;
|
|
|
|
|
|
|
|
/* Loop over fields and extract each one. */
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
char code = pq_getmsgbyte(msg);
|
|
|
|
const char *value;
|
|
|
|
|
|
|
|
if (code == '\0')
|
|
|
|
{
|
|
|
|
pq_getmsgend(msg);
|
|
|
|
break;
|
|
|
|
}
|
Fix several mistakes around parallel workers and client_encoding.
Previously, workers sent data to the leader using the client encoding.
That mostly worked, but the leader the converted the data back to the
server encoding. Since not all encoding conversions are reversible,
that could provoke failures. Fix by using the database encoding for
all communication between worker and leader.
Also, while temporary changes to GUC settings, as from the SET clause
of a function, are in general OK for parallel query, changing
client_encoding this way inside of a parallel worker is not OK.
Previously, that would have confused the leader; with these changes,
it would not confuse the leader, but it wouldn't do anything either.
So refuse such changes in parallel workers.
Also, the previous code naively assumed that when it received a
NotifyResonse from the worker, it could pass that directly back to the
user. But now that worker-to-leader communication always uses the
database encoding, that's clearly no longer correct - though,
actually, the old way was always broken for V2 clients. So
disassemble and reconstitute the message instead.
Issues reported by Peter Eisentraut. Patch by me, reviewed by
Peter Eisentraut.
2016-07-01 00:35:32 +02:00
|
|
|
value = pq_getmsgrawstring(msg);
|
2014-10-31 17:02:40 +01:00
|
|
|
|
|
|
|
switch (code)
|
|
|
|
{
|
|
|
|
case PG_DIAG_SEVERITY:
|
2016-08-26 22:20:17 +02:00
|
|
|
/* ignore, trusting we'll get a nonlocalized version */
|
|
|
|
break;
|
|
|
|
case PG_DIAG_SEVERITY_NONLOCALIZED:
|
2014-10-31 17:02:40 +01:00
|
|
|
if (strcmp(value, "DEBUG") == 0)
|
2016-08-26 22:20:17 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We can't reconstruct the exact DEBUG level, but
|
|
|
|
* presumably it was >= client_min_messages, so select
|
|
|
|
* DEBUG1 to ensure we'll pass it on to the client.
|
|
|
|
*/
|
|
|
|
edata->elevel = DEBUG1;
|
|
|
|
}
|
2014-10-31 17:02:40 +01:00
|
|
|
else if (strcmp(value, "LOG") == 0)
|
2016-08-26 22:20:17 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* It can't be LOG_SERVER_ONLY, or the worker wouldn't
|
|
|
|
* have sent it to us; so LOG is the correct value.
|
|
|
|
*/
|
|
|
|
edata->elevel = LOG;
|
|
|
|
}
|
2014-10-31 17:02:40 +01:00
|
|
|
else if (strcmp(value, "INFO") == 0)
|
|
|
|
edata->elevel = INFO;
|
|
|
|
else if (strcmp(value, "NOTICE") == 0)
|
|
|
|
edata->elevel = NOTICE;
|
|
|
|
else if (strcmp(value, "WARNING") == 0)
|
|
|
|
edata->elevel = WARNING;
|
|
|
|
else if (strcmp(value, "ERROR") == 0)
|
|
|
|
edata->elevel = ERROR;
|
|
|
|
else if (strcmp(value, "FATAL") == 0)
|
|
|
|
edata->elevel = FATAL;
|
|
|
|
else if (strcmp(value, "PANIC") == 0)
|
|
|
|
edata->elevel = PANIC;
|
|
|
|
else
|
2016-08-26 22:20:17 +02:00
|
|
|
elog(ERROR, "unrecognized error severity: \"%s\"", value);
|
2014-10-31 17:02:40 +01:00
|
|
|
break;
|
|
|
|
case PG_DIAG_SQLSTATE:
|
|
|
|
if (strlen(value) != 5)
|
2016-08-26 22:20:17 +02:00
|
|
|
elog(ERROR, "invalid SQLSTATE: \"%s\"", value);
|
2014-10-31 17:02:40 +01:00
|
|
|
edata->sqlerrcode = MAKE_SQLSTATE(value[0], value[1], value[2],
|
|
|
|
value[3], value[4]);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_MESSAGE_PRIMARY:
|
|
|
|
edata->message = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_MESSAGE_DETAIL:
|
|
|
|
edata->detail = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_MESSAGE_HINT:
|
|
|
|
edata->hint = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_STATEMENT_POSITION:
|
2018-07-22 23:58:01 +02:00
|
|
|
edata->cursorpos = pg_strtoint32(value);
|
2014-10-31 17:02:40 +01:00
|
|
|
break;
|
|
|
|
case PG_DIAG_INTERNAL_POSITION:
|
2018-07-22 23:58:01 +02:00
|
|
|
edata->internalpos = pg_strtoint32(value);
|
2014-10-31 17:02:40 +01:00
|
|
|
break;
|
|
|
|
case PG_DIAG_INTERNAL_QUERY:
|
|
|
|
edata->internalquery = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_CONTEXT:
|
|
|
|
edata->context = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_SCHEMA_NAME:
|
|
|
|
edata->schema_name = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_TABLE_NAME:
|
|
|
|
edata->table_name = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_COLUMN_NAME:
|
|
|
|
edata->column_name = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_DATATYPE_NAME:
|
|
|
|
edata->datatype_name = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_CONSTRAINT_NAME:
|
|
|
|
edata->constraint_name = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_SOURCE_FILE:
|
|
|
|
edata->filename = pstrdup(value);
|
|
|
|
break;
|
|
|
|
case PG_DIAG_SOURCE_LINE:
|
2018-07-22 23:58:01 +02:00
|
|
|
edata->lineno = pg_strtoint32(value);
|
2014-10-31 17:02:40 +01:00
|
|
|
break;
|
|
|
|
case PG_DIAG_SOURCE_FUNCTION:
|
|
|
|
edata->funcname = pstrdup(value);
|
|
|
|
break;
|
|
|
|
default:
|
2016-08-26 22:20:17 +02:00
|
|
|
elog(ERROR, "unrecognized error field code: %d", (int) code);
|
2014-10-31 17:02:40 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|