postgresql/src/backend/storage/ipc/latch.c

1491 lines
41 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* latch.c
* Routines for inter-process latches
*
* The Unix implementation uses the so-called self-pipe trick to overcome the
* race condition involved with poll() (or epoll_wait() on linux) and setting
* a global flag in the signal handler. When a latch is set and the current
* process is waiting for it, the signal handler wakes up the poll() in
* WaitLatch by writing a byte to a pipe. A signal by itself doesn't interrupt
* poll() on all platforms, and even on platforms where it does, a signal that
* arrives just before the poll() call does not prevent poll() from entering
* sleep. An incoming byte on a pipe however reliably interrupts the sleep,
* and causes poll() to return immediately even if the signal arrives before
* poll() begins.
*
* When SetLatch is called from the same process that owns the latch,
* SetLatch writes the byte directly to the pipe. If it's owned by another
* process, SIGUSR1 is sent and the signal handler in the waiting process
* writes the byte to the pipe on behalf of the signaling process.
*
* The Windows implementation uses Windows events that are inherited by all
* postmaster child processes. There's no need for the self-pipe trick there.
*
2017-01-03 19:48:53 +01:00
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/storage/ipc/latch.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <fcntl.h>
#include <limits.h>
#include <signal.h>
#include <unistd.h>
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#ifdef HAVE_SYS_EPOLL_H
#include <sys/epoll.h>
#endif
#ifdef HAVE_POLL_H
#include <poll.h>
#endif
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "portability/instr_time.h"
#include "postmaster/postmaster.h"
#include "storage/latch.h"
#include "storage/pmsignal.h"
#include "storage/shmem.h"
/*
* Select the fd readiness primitive to use. Normally the "most modern"
* primitive supported by the OS will be used, but for testing it can be
* useful to manually specify the used primitive. If desired, just add a
* define somewhere before this block.
*/
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
defined(WAIT_USE_WIN32)
/* don't overwrite manual choice */
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#elif defined(HAVE_SYS_EPOLL_H)
#define WAIT_USE_EPOLL
#elif defined(HAVE_POLL)
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#define WAIT_USE_POLL
#elif WIN32
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#define WAIT_USE_WIN32
#else
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#error "no wait set implementation available"
#endif
/* typedef in latch.h */
struct WaitEventSet
{
int nevents; /* number of registered events */
int nevents_space; /* maximum number of events in this set */
/*
* Array, of nevents_space length, storing the definition of events this
* set is waiting for.
*/
WaitEvent *events;
/*
* If WL_LATCH_SET is specified in any wait event, latch is a pointer to
* said latch, and latch_pos the offset in the ->events array. This is
* useful because we check the state of the latch before performing doing
* syscalls related to waiting.
*/
Latch *latch;
int latch_pos;
#if defined(WAIT_USE_EPOLL)
int epoll_fd;
/* epoll_wait returns events in a user provided arrays, allocate once */
struct epoll_event *epoll_ret_events;
#elif defined(WAIT_USE_POLL)
/* poll expects events to be waited on every poll() call, prepare once */
struct pollfd *pollfds;
#elif defined(WAIT_USE_WIN32)
/*
* Array of windows events. The first element always contains
* pgwin32_signal_event, so the remaining elements are offset by one (i.e.
* event->pos + 1).
*/
HANDLE *handles;
#endif
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
};
#ifndef WIN32
/* Are we currently in WaitLatch? The signal handler would like to know. */
static volatile sig_atomic_t waiting = false;
/* Read and write ends of the self-pipe */
2011-04-10 17:42:00 +02:00
static int selfpipe_readfd = -1;
static int selfpipe_writefd = -1;
/* Private function prototypes */
static void sendSelfPipeByte(void);
static void drainSelfPipe(void);
#endif /* WIN32 */
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#if defined(WAIT_USE_EPOLL)
static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
#elif defined(WAIT_USE_POLL)
static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
#elif defined(WAIT_USE_WIN32)
static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
#endif
static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
WaitEvent *occurred_events, int nevents);
/*
* Initialize the process-local latch infrastructure.
*
* This must be called once during startup of any process that can wait on
* latches, before it issues any InitLatch() or OwnLatch() calls.
*/
void
InitializeLatchSupport(void)
{
#ifndef WIN32
int pipefd[2];
Assert(selfpipe_readfd == -1);
/*
* Set up the self-pipe that allows a signal handler to wake up the
* poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
* that SetLatch won't block if the event has already been set many times
* filling the kernel buffer. Make the read-end non-blocking too, so that
* we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
*/
if (pipe(pipefd) < 0)
elog(FATAL, "pipe() failed: %m");
Avoid depending on non-POSIX behavior of fcntl(2). The POSIX standard does not say that the success return value for fcntl(F_SETFD) and fcntl(F_SETFL) is zero; it says only that it's not -1. We had several calls that were making the stronger assumption. Adjust them to test specifically for -1 for strict spec compliance. The standard further leaves open the possibility that the O_NONBLOCK flag bit is not the only active one in F_SETFL's argument. Formally, therefore, one ought to get the current flags with F_GETFL and store them back with only the O_NONBLOCK bit changed when trying to change the nonblock state. In port/noblock.c, we were doing the full pushup in pg_set_block but not in pg_set_noblock, which is just weird. Make both of them do it properly, since they have little business making any assumptions about the socket they're handed. The other places where we're issuing F_SETFL are working with FDs we just got from pipe(2), so it's reasonable to assume the FDs' properties are all default, so I didn't bother adding F_GETFL steps there. Also, while pg_set_block deserves some points for trying to do things right, somebody had decided that it'd be even better to cast fcntl's third argument to "long". Which is completely loony, because POSIX clearly says the third argument for an F_SETFL call is "int". Given the lack of field complaints, these missteps apparently are not of significance on any common platforms. But they're still wrong, so back-patch to all supported branches. Discussion: https://postgr.es/m/30882.1492800880@sss.pgh.pa.us
2017-04-21 21:55:56 +02:00
if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
elog(FATAL, "fcntl() failed on read-end of self-pipe: %m");
Avoid depending on non-POSIX behavior of fcntl(2). The POSIX standard does not say that the success return value for fcntl(F_SETFD) and fcntl(F_SETFL) is zero; it says only that it's not -1. We had several calls that were making the stronger assumption. Adjust them to test specifically for -1 for strict spec compliance. The standard further leaves open the possibility that the O_NONBLOCK flag bit is not the only active one in F_SETFL's argument. Formally, therefore, one ought to get the current flags with F_GETFL and store them back with only the O_NONBLOCK bit changed when trying to change the nonblock state. In port/noblock.c, we were doing the full pushup in pg_set_block but not in pg_set_noblock, which is just weird. Make both of them do it properly, since they have little business making any assumptions about the socket they're handed. The other places where we're issuing F_SETFL are working with FDs we just got from pipe(2), so it's reasonable to assume the FDs' properties are all default, so I didn't bother adding F_GETFL steps there. Also, while pg_set_block deserves some points for trying to do things right, somebody had decided that it'd be even better to cast fcntl's third argument to "long". Which is completely loony, because POSIX clearly says the third argument for an F_SETFL call is "int". Given the lack of field complaints, these missteps apparently are not of significance on any common platforms. But they're still wrong, so back-patch to all supported branches. Discussion: https://postgr.es/m/30882.1492800880@sss.pgh.pa.us
2017-04-21 21:55:56 +02:00
if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
elog(FATAL, "fcntl() failed on write-end of self-pipe: %m");
selfpipe_readfd = pipefd[0];
selfpipe_writefd = pipefd[1];
#else
/* currently, nothing to do here for Windows */
#endif
}
/*
* Initialize a backend-local latch.
*/
void
InitLatch(volatile Latch *latch)
{
latch->is_set = false;
latch->owner_pid = MyProcPid;
latch->is_shared = false;
#ifndef WIN32
/* Assert InitializeLatchSupport has been called in this process */
Assert(selfpipe_readfd >= 0);
#else
latch->event = CreateEvent(NULL, TRUE, FALSE, NULL);
if (latch->event == NULL)
elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
#endif /* WIN32 */
}
/*
* Initialize a shared latch that can be set from other processes. The latch
* is initially owned by no-one; use OwnLatch to associate it with the
* current process.
*
* InitSharedLatch needs to be called in postmaster before forking child
* processes, usually right after allocating the shared memory block
* containing the latch with ShmemInitStruct. (The Unix implementation
* doesn't actually require that, but the Windows one does.) Because of
* this restriction, we have no concurrency issues to worry about here.
*/
void
InitSharedLatch(volatile Latch *latch)
{
#ifdef WIN32
SECURITY_ATTRIBUTES sa;
/*
* Set up security attributes to specify that the events are inherited.
*/
ZeroMemory(&sa, sizeof(sa));
sa.nLength = sizeof(sa);
sa.bInheritHandle = TRUE;
latch->event = CreateEvent(&sa, TRUE, FALSE, NULL);
if (latch->event == NULL)
elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
#endif
latch->is_set = false;
latch->owner_pid = 0;
latch->is_shared = true;
}
/*
* Associate a shared latch with the current process, allowing it to
* wait on the latch.
*
* Although there is a sanity check for latch-already-owned, we don't do
* any sort of locking here, meaning that we could fail to detect the error
* if two processes try to own the same latch at about the same time. If
* there is any risk of that, caller must provide an interlock to prevent it.
*
* In any process that calls OwnLatch(), make sure that
* latch_sigusr1_handler() is called from the SIGUSR1 signal handler,
* as shared latches use SIGUSR1 for inter-process communication.
*/
void
OwnLatch(volatile Latch *latch)
{
/* Sanity checks */
Assert(latch->is_shared);
#ifndef WIN32
/* Assert InitializeLatchSupport has been called in this process */
Assert(selfpipe_readfd >= 0);
#endif
if (latch->owner_pid != 0)
elog(ERROR, "latch already owned");
latch->owner_pid = MyProcPid;
}
/*
* Disown a shared latch currently owned by the current process.
*/
void
DisownLatch(volatile Latch *latch)
{
Assert(latch->is_shared);
Assert(latch->owner_pid == MyProcPid);
latch->owner_pid = 0;
}
/*
* Wait for a given latch to be set, or for postmaster death, or until timeout
* is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
* to wait for. If the latch is already set (and WL_LATCH_SET is given), the
* function returns immediately.
*
* The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
* is given. Although it is declared as "long", we don't actually support
* timeouts longer than INT_MAX milliseconds. Note that some extra overhead
* is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
*
* The latch must be owned by the current process, ie. it must be a
* backend-local latch initialized with InitLatch, or a shared latch
* associated with the current process by calling OwnLatch.
*
* Returns bit mask indicating which condition(s) caused the wake-up. Note
* that if multiple wake-up conditions are true, there is no guarantee that
* we return all of them in one call, but we will return at least one.
*/
int
WaitLatch(volatile Latch *latch, int wakeEvents, long timeout,
uint32 wait_event_info)
{
return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout,
wait_event_info);
}
/*
* Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
* conditions.
*
* When waiting on a socket, EOF and error conditions are reported by
* returning the socket as readable/writable or both, depending on
* WL_SOCKET_READABLE/WL_SOCKET_WRITEABLE being specified.
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
*
* NB: These days this is just a wrapper around the WaitEventSet API. When
* using a latch very frequently, consider creating a longer living
* WaitEventSet instead; that's more efficient.
*/
int
WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
long timeout, uint32 wait_event_info)
{
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
int ret = 0;
int rc;
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
WaitEvent event;
WaitEventSet *set = CreateWaitEventSet(CurrentMemoryContext, 3);
if (wakeEvents & WL_TIMEOUT)
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
Assert(timeout >= 0);
else
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
timeout = -1;
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
if (wakeEvents & WL_LATCH_SET)
AddWaitEventToSet(set, WL_LATCH_SET, PGINVALID_SOCKET,
(Latch *) latch, NULL);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
if (wakeEvents & WL_POSTMASTER_DEATH)
AddWaitEventToSet(set, WL_POSTMASTER_DEATH, PGINVALID_SOCKET,
NULL, NULL);
if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
{
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
int ev;
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
ev = wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE);
AddWaitEventToSet(set, ev, sock, NULL, NULL);
}
rc = WaitEventSetWait(set, timeout, &event, 1, wait_event_info);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
if (rc == 0)
ret |= WL_TIMEOUT;
else
{
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
ret |= event.events & (WL_LATCH_SET |
WL_POSTMASTER_DEATH |
WL_SOCKET_READABLE |
WL_SOCKET_WRITEABLE);
}
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
FreeWaitEventSet(set);
return ret;
}
/*
* Sets a latch and wakes up anyone waiting on it.
*
* This is cheap if the latch is already set, otherwise not so much.
*
* NB: when calling this in a signal handler, be sure to save and restore
* errno around it. (That's standard practice in most signal handlers, of
* course, but we used to omit it in handlers that only set a flag.)
*
* NB: this function is called from critical sections and signal handlers so
* throwing an error is not a good idea.
*/
void
SetLatch(volatile Latch *latch)
{
#ifndef WIN32
2011-04-10 17:42:00 +02:00
pid_t owner_pid;
#else
HANDLE handle;
#endif
/*
2016-03-29 02:55:15 +02:00
* The memory barrier has to be placed here to ensure that any flag
* variables possibly changed by this process have been flushed to main
* memory, before we check/set is_set.
*/
pg_memory_barrier();
/* Quick exit if already set */
if (latch->is_set)
return;
latch->is_set = true;
#ifndef WIN32
/*
2011-04-10 17:42:00 +02:00
* See if anyone's waiting for the latch. It can be the current process if
* we're in a signal handler. We use the self-pipe to wake up the
* poll()/epoll_wait() in that case. If it's another process, send a
* signal.
*
* Fetch owner_pid only once, in case the latch is concurrently getting
* owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
* guaranteed to be true! In practice, the effective range of pid_t fits
* in a 32 bit integer, and so should be atomic. In the worst case, we
* might end up signaling the wrong process. Even then, you're very
* unlucky if a process with that bogus pid exists and belongs to
* Postgres; and PG database processes should handle excess SIGUSR1
* interrupts without a problem anyhow.
*
* Another sort of race condition that's possible here is for a new
* process to own the latch immediately after we look, so we don't signal
* it. This is okay so long as all callers of ResetLatch/WaitLatch follow
* the standard coding convention of waiting at the bottom of their loops,
* not the top, so that they'll correctly process latch-setting events
* that happen before they enter the loop.
*/
owner_pid = latch->owner_pid;
if (owner_pid == 0)
return;
else if (owner_pid == MyProcPid)
{
if (waiting)
sendSelfPipeByte();
}
else
kill(owner_pid, SIGUSR1);
#else
/*
* See if anyone's waiting for the latch. It can be the current process if
* we're in a signal handler.
*
* Use a local variable here just in case somebody changes the event field
* concurrently (which really should not happen).
*/
handle = latch->event;
if (handle)
{
SetEvent(handle);
/*
* Note that we silently ignore any errors. We might be in a signal
* handler or other critical path where it's not safe to call elog().
*/
}
#endif
}
/*
* Clear the latch. Calling WaitLatch after this will sleep, unless
* the latch is set again before the WaitLatch call.
*/
void
ResetLatch(volatile Latch *latch)
{
/* Only the owner should reset the latch */
Assert(latch->owner_pid == MyProcPid);
latch->is_set = false;
/*
* Ensure that the write to is_set gets flushed to main memory before we
* examine any flag variables. Otherwise a concurrent SetLatch might
* falsely conclude that it needn't signal us, even though we have missed
* seeing some flag updates that SetLatch was supposed to inform us of.
*/
pg_memory_barrier();
}
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
/*
* Create a WaitEventSet with space for nevents different events to wait for.
*
* These events can then be efficiently waited upon together, using
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
* WaitEventSetWait().
*/
WaitEventSet *
CreateWaitEventSet(MemoryContext context, int nevents)
{
WaitEventSet *set;
char *data;
Size sz = 0;
2016-06-05 23:02:56 +02:00
/*
* Use MAXALIGN size/alignment to guarantee that later uses of memory are
* aligned correctly. E.g. epoll_event might need 8 byte alignment on some
* platforms, but earlier allocations like WaitEventSet and WaitEvent
* might not sized to guarantee that when purely using sizeof().
*/
sz += MAXALIGN(sizeof(WaitEventSet));
sz += MAXALIGN(sizeof(WaitEvent) * nevents);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#if defined(WAIT_USE_EPOLL)
sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#elif defined(WAIT_USE_POLL)
sz += MAXALIGN(sizeof(struct pollfd) * nevents);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#elif defined(WAIT_USE_WIN32)
/* need space for the pgwin32_signal_event */
sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#endif
data = (char *) MemoryContextAllocZero(context, sz);
set = (WaitEventSet *) data;
data += MAXALIGN(sizeof(WaitEventSet));
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
set->events = (WaitEvent *) data;
data += MAXALIGN(sizeof(WaitEvent) * nevents);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#if defined(WAIT_USE_EPOLL)
set->epoll_ret_events = (struct epoll_event *) data;
data += MAXALIGN(sizeof(struct epoll_event) * nevents);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#elif defined(WAIT_USE_POLL)
set->pollfds = (struct pollfd *) data;
data += MAXALIGN(sizeof(struct pollfd) * nevents);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#elif defined(WAIT_USE_WIN32)
set->handles = (HANDLE) data;
data += MAXALIGN(sizeof(HANDLE) * nevents);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#endif
set->latch = NULL;
set->nevents_space = nevents;
#if defined(WAIT_USE_EPOLL)
set->epoll_fd = epoll_create(nevents);
if (set->epoll_fd < 0)
elog(ERROR, "epoll_create failed: %m");
#elif defined(WAIT_USE_WIN32)
/*
* To handle signals while waiting, we need to add a win32 specific event.
* We accounted for the additional event at the top of this routine. See
* port/win32/signal.c for more details.
*
* Note: pgwin32_signal_event should be first to ensure that it will be
* reported when multiple events are set. We want to guarantee that
* pending signals are serviced.
*/
set->handles[0] = pgwin32_signal_event;
StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
#endif
return set;
}
/*
* Free a previously created WaitEventSet.
*/
void
FreeWaitEventSet(WaitEventSet *set)
{
#if defined(WAIT_USE_EPOLL)
close(set->epoll_fd);
#elif defined(WAIT_USE_WIN32)
WaitEvent *cur_event;
for (cur_event = set->events;
cur_event < (set->events + set->nevents);
cur_event++)
{
if (cur_event->events & WL_LATCH_SET)
{
/* uses the latch's HANDLE */
}
else if (cur_event->events & WL_POSTMASTER_DEATH)
{
/* uses PostmasterHandle */
}
else
{
/* Clean up the event object we created for the socket */
WSAEventSelect(cur_event->fd, NULL, 0);
WSACloseEvent(set->handles[cur_event->pos + 1]);
}
}
#endif
pfree(set);
}
/* ---
* Add an event to the set. Possible events are:
* - WL_LATCH_SET: Wait for the latch to be set
* - WL_POSTMASTER_DEATH: Wait for postmaster to die
* - WL_SOCKET_READABLE: Wait for socket to become readable
* can be combined in one event with WL_SOCKET_WRITEABLE
* - WL_SOCKET_WRITEABLE: Wait for socket to become writeable
* can be combined with WL_SOCKET_READABLE
*
* Returns the offset in WaitEventSet->events (starting from 0), which can be
* used to modify previously added wait events using ModifyWaitEvent().
*
* In the WL_LATCH_SET case the latch must be owned by the current process,
* i.e. it must be a backend-local latch initialized with InitLatch, or a
* shared latch associated with the current process by calling OwnLatch.
*
* In the WL_SOCKET_READABLE/WRITEABLE case, EOF and error conditions are
* reported by returning the socket as readable/writable or both, depending on
* WL_SOCKET_READABLE/WRITEABLE being specified.
*
* The user_data pointer specified here will be set for the events returned
* by WaitEventSetWait(), allowing to easily associate additional data with
* events.
*/
int
AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch,
void *user_data)
{
WaitEvent *event;
/* not enough space */
Assert(set->nevents < set->nevents_space);
if (latch)
{
if (latch->owner_pid != MyProcPid)
elog(ERROR, "cannot wait on a latch owned by another process");
if (set->latch)
elog(ERROR, "cannot wait on more than one latch");
if ((events & WL_LATCH_SET) != WL_LATCH_SET)
elog(ERROR, "latch events only support being set");
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
}
else
{
if (events & WL_LATCH_SET)
elog(ERROR, "cannot wait on latch without a specified latch");
}
/* waiting for socket readiness without a socket indicates a bug */
if (fd == PGINVALID_SOCKET &&
(events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)))
elog(ERROR, "cannot wait on socket event without a socket");
event = &set->events[set->nevents];
event->pos = set->nevents++;
event->fd = fd;
event->events = events;
event->user_data = user_data;
#ifdef WIN32
event->reset = false;
#endif
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
if (events == WL_LATCH_SET)
{
set->latch = latch;
set->latch_pos = event->pos;
#ifndef WIN32
event->fd = selfpipe_readfd;
#endif
}
else if (events == WL_POSTMASTER_DEATH)
{
#ifndef WIN32
event->fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
#endif
}
/* perform wait primitive specific initialization, if needed */
#if defined(WAIT_USE_EPOLL)
WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
#elif defined(WAIT_USE_POLL)
WaitEventAdjustPoll(set, event);
#elif defined(WAIT_USE_WIN32)
WaitEventAdjustWin32(set, event);
#endif
return event->pos;
}
/*
* Change the event mask and, in the WL_LATCH_SET case, the latch associated
* with the WaitEvent.
*
* 'pos' is the id returned by AddWaitEventToSet.
*/
void
ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
{
WaitEvent *event;
Assert(pos < set->nevents);
event = &set->events[pos];
/*
* If neither the event mask nor the associated latch changes, return
* early. That's an important optimization for some sockets, where
* ModifyWaitEvent is frequently used to switch from waiting for reads to
* waiting on writes.
*/
if (events == event->events &&
(!(event->events & WL_LATCH_SET) || set->latch == latch))
return;
if (event->events & WL_LATCH_SET &&
events != event->events)
{
/* we could allow to disable latch events for a while */
elog(ERROR, "cannot modify latch event");
}
if (event->events & WL_POSTMASTER_DEATH)
{
elog(ERROR, "cannot modify postmaster death event");
}
/* FIXME: validate event mask */
event->events = events;
if (events == WL_LATCH_SET)
{
set->latch = latch;
}
#if defined(WAIT_USE_EPOLL)
WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
#elif defined(WAIT_USE_POLL)
WaitEventAdjustPoll(set, event);
#elif defined(WAIT_USE_WIN32)
WaitEventAdjustWin32(set, event);
#endif
}
#if defined(WAIT_USE_EPOLL)
/*
* action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
*/
static void
WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
{
struct epoll_event epoll_ev;
int rc;
/* pointer to our event, returned by epoll_wait */
epoll_ev.data.ptr = event;
/* always wait for errors */
epoll_ev.events = EPOLLERR | EPOLLHUP;
/* prepare pollfd entry once */
if (event->events == WL_LATCH_SET)
{
Assert(set->latch != NULL);
epoll_ev.events |= EPOLLIN;
}
else if (event->events == WL_POSTMASTER_DEATH)
{
epoll_ev.events |= EPOLLIN;
}
else
{
Assert(event->fd != PGINVALID_SOCKET);
Assert(event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE));
if (event->events & WL_SOCKET_READABLE)
epoll_ev.events |= EPOLLIN;
if (event->events & WL_SOCKET_WRITEABLE)
epoll_ev.events |= EPOLLOUT;
}
/*
* Even though unused, we also pass epoll_ev as the data argument if
* EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
* requiring that, and actually it makes the code simpler...
*/
rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
if (rc < 0)
ereport(ERROR,
(errcode_for_socket_access(),
errmsg("epoll_ctl() failed: %m")));
}
#endif
#if defined(WAIT_USE_POLL)
static void
WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
{
struct pollfd *pollfd = &set->pollfds[event->pos];
pollfd->revents = 0;
pollfd->fd = event->fd;
/* prepare pollfd entry once */
if (event->events == WL_LATCH_SET)
{
Assert(set->latch != NULL);
pollfd->events = POLLIN;
}
else if (event->events == WL_POSTMASTER_DEATH)
{
pollfd->events = POLLIN;
}
else
{
Assert(event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE));
pollfd->events = 0;
if (event->events & WL_SOCKET_READABLE)
pollfd->events |= POLLIN;
if (event->events & WL_SOCKET_WRITEABLE)
pollfd->events |= POLLOUT;
}
Assert(event->fd != PGINVALID_SOCKET);
}
#endif
#if defined(WAIT_USE_WIN32)
static void
WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
{
HANDLE *handle = &set->handles[event->pos + 1];
if (event->events == WL_LATCH_SET)
{
Assert(set->latch != NULL);
*handle = set->latch->event;
}
else if (event->events == WL_POSTMASTER_DEATH)
{
*handle = PostmasterHandle;
}
else
{
int flags = FD_CLOSE; /* always check for errors/EOF */
if (event->events & WL_SOCKET_READABLE)
flags |= FD_READ;
if (event->events & WL_SOCKET_WRITEABLE)
flags |= FD_WRITE;
if (*handle == WSA_INVALID_EVENT)
{
*handle = WSACreateEvent();
if (*handle == WSA_INVALID_EVENT)
elog(ERROR, "failed to create event for socket: error code %u",
WSAGetLastError());
}
if (WSAEventSelect(event->fd, *handle, flags) != 0)
elog(ERROR, "failed to set up event for socket: error code %u",
WSAGetLastError());
Assert(event->fd != PGINVALID_SOCKET);
}
}
#endif
/*
* Wait for events added to the set to happen, or until the timeout is
* reached. At most nevents occurred events are returned.
*
* If timeout = -1, block until an event occurs; if 0, check sockets for
* readiness, but don't block; if > 0, block for at most timeout milliseconds.
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
*
* Returns the number of events occurred, or 0 if the timeout was reached.
*
* Returned events will have the fd, pos, user_data fields set to the
* values associated with the registered event.
*/
int
WaitEventSetWait(WaitEventSet *set, long timeout,
WaitEvent *occurred_events, int nevents,
uint32 wait_event_info)
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
{
int returned_events = 0;
instr_time start_time;
instr_time cur_time;
long cur_timeout = -1;
Assert(nevents > 0);
/*
* Initialize timeout if requested. We must record the current time so
* that we can determine the remaining timeout if interrupted.
*/
if (timeout >= 0)
{
INSTR_TIME_SET_CURRENT(start_time);
Assert(timeout >= 0 && timeout <= INT_MAX);
cur_timeout = timeout;
}
pgstat_report_wait_start(wait_event_info);
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
#ifndef WIN32
waiting = true;
#else
/* Ensure that signals are serviced even if latch is already set */
pgwin32_dispatch_queued_signals();
#endif
while (returned_events == 0)
{
int rc;
/*
* Check if the latch is set already. If so, leave the loop
* immediately, avoid blocking again. We don't attempt to report any
* other events that might also be satisfied.
*
* If someone sets the latch between this and the
* WaitEventSetWaitBlock() below, the setter will write a byte to the
* pipe (or signal us and the signal handler will do that), and the
* readiness routine will return immediately.
*
* On unix, If there's a pending byte in the self pipe, we'll notice
* whenever blocking. Only clearing the pipe in that case avoids
* having to drain it every time WaitLatchOrSocket() is used. Should
* the pipe-buffer fill up we're still ok, because the pipe is in
* nonblocking mode. It's unlikely for that to happen, because the
* self pipe isn't filled unless we're blocking (waiting = true), or
* from inside a signal handler in latch_sigusr1_handler().
*
* On windows, we'll also notice if there's a pending event for the
* latch when blocking, but there's no danger of anything filling up,
* as "Setting an event that is already set has no effect.".
*
* Note: we assume that the kernel calls involved in latch management
* will provide adequate synchronization on machines with weak memory
* ordering, so that we cannot miss seeing is_set if a notification
* has already been queued.
*/
if (set->latch && set->latch->is_set)
{
occurred_events->fd = PGINVALID_SOCKET;
occurred_events->pos = set->latch_pos;
occurred_events->user_data =
set->events[set->latch_pos].user_data;
occurred_events->events = WL_LATCH_SET;
occurred_events++;
returned_events++;
break;
}
/*
* Wait for events using the readiness primitive chosen at the top of
* this file. If -1 is returned, a timeout has occurred, if 0 we have
* to retry, everything >= 1 is the number of returned events.
*/
rc = WaitEventSetWaitBlock(set, cur_timeout,
occurred_events, nevents);
if (rc == -1)
break; /* timeout occurred */
else
returned_events = rc;
/* If we're not done, update cur_timeout for next iteration */
if (returned_events == 0 && timeout >= 0)
{
INSTR_TIME_SET_CURRENT(cur_time);
INSTR_TIME_SUBTRACT(cur_time, start_time);
cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
if (cur_timeout <= 0)
break;
}
}
#ifndef WIN32
waiting = false;
#endif
pgstat_report_wait_end();
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
return returned_events;
}
#if defined(WAIT_USE_EPOLL)
/*
* Wait using linux's epoll_wait(2).
*
* This is the preferrable wait method, as several readiness notifications are
* delivered, without having to iterate through all of set->events. The return
* epoll_event struct contain a pointer to our events, making association
* easy.
*/
static inline int
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
WaitEvent *occurred_events, int nevents)
{
int returned_events = 0;
int rc;
WaitEvent *cur_event;
struct epoll_event *cur_epoll_event;
/* Sleep */
rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
nevents, cur_timeout);
/* Check return code */
if (rc < 0)
{
/* EINTR is okay, otherwise complain */
if (errno != EINTR)
{
waiting = false;
ereport(ERROR,
(errcode_for_socket_access(),
errmsg("epoll_wait() failed: %m")));
}
return 0;
}
else if (rc == 0)
{
/* timeout exceeded */
return -1;
}
/*
* At least one event occurred, iterate over the returned epoll events
* until they're either all processed, or we've returned all the events
* the caller desired.
*/
for (cur_epoll_event = set->epoll_ret_events;
cur_epoll_event < (set->epoll_ret_events + rc) &&
returned_events < nevents;
cur_epoll_event++)
{
/* epoll's data pointer is set to the associated WaitEvent */
cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
occurred_events->pos = cur_event->pos;
occurred_events->user_data = cur_event->user_data;
occurred_events->events = 0;
if (cur_event->events == WL_LATCH_SET &&
cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
{
/* There's data in the self-pipe, clear it. */
drainSelfPipe();
if (set->latch->is_set)
{
occurred_events->fd = PGINVALID_SOCKET;
occurred_events->events = WL_LATCH_SET;
occurred_events++;
returned_events++;
}
}
else if (cur_event->events == WL_POSTMASTER_DEATH &&
cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
{
/*
* We expect an EPOLLHUP when the remote end is closed, but
* because we don't expect the pipe to become readable or to have
* any errors either, treat those cases as postmaster death, too.
*
* Be paranoid about a spurious event signalling the postmaster as
* being dead. There have been reports about that happening with
* older primitives (select(2) to be specific), and a spurious
* WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
* cost much.
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
*/
if (!PostmasterIsAlive())
{
occurred_events->fd = PGINVALID_SOCKET;
occurred_events->events = WL_POSTMASTER_DEATH;
occurred_events++;
returned_events++;
}
}
else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
{
Assert(cur_event->fd != PGINVALID_SOCKET);
if ((cur_event->events & WL_SOCKET_READABLE) &&
(cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
{
/* data available in socket, or EOF */
occurred_events->events |= WL_SOCKET_READABLE;
}
if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
(cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
{
/* writable, or EOF */
occurred_events->events |= WL_SOCKET_WRITEABLE;
}
if (occurred_events->events != 0)
{
occurred_events->fd = cur_event->fd;
occurred_events++;
returned_events++;
}
}
}
return returned_events;
}
#elif defined(WAIT_USE_POLL)
/*
* Wait using poll(2).
*
* This allows to receive readiness notifications for several events at once,
* but requires iterating through all of set->pollfds.
*/
static inline int
WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
WaitEvent *occurred_events, int nevents)
{
int returned_events = 0;
int rc;
WaitEvent *cur_event;
struct pollfd *cur_pollfd;
/* Sleep */
rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
/* Check return code */
if (rc < 0)
{
/* EINTR is okay, otherwise complain */
if (errno != EINTR)
{
waiting = false;
ereport(ERROR,
(errcode_for_socket_access(),
errmsg("poll() failed: %m")));
}
return 0;
}
else if (rc == 0)
{
/* timeout exceeded */
return -1;
}
for (cur_event = set->events, cur_pollfd = set->pollfds;
cur_event < (set->events + set->nevents) &&
returned_events < nevents;
cur_event++, cur_pollfd++)
{
/* no activity on this FD, skip */
if (cur_pollfd->revents == 0)
continue;
occurred_events->pos = cur_event->pos;
occurred_events->user_data = cur_event->user_data;
occurred_events->events = 0;
if (cur_event->events == WL_LATCH_SET &&
(cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
{
/* There's data in the self-pipe, clear it. */
drainSelfPipe();
if (set->latch->is_set)
{
occurred_events->fd = PGINVALID_SOCKET;
occurred_events->events = WL_LATCH_SET;
occurred_events++;
returned_events++;
}
}
else if (cur_event->events == WL_POSTMASTER_DEATH &&
(cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
{
/*
* We expect an POLLHUP when the remote end is closed, but because
* we don't expect the pipe to become readable or to have any
* errors either, treat those cases as postmaster death, too.
*
* Be paranoid about a spurious event signalling the postmaster as
* being dead. There have been reports about that happening with
* older primitives (select(2) to be specific), and a spurious
* WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
* cost much.
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
*/
if (!PostmasterIsAlive())
{
occurred_events->fd = PGINVALID_SOCKET;
occurred_events->events = WL_POSTMASTER_DEATH;
occurred_events++;
returned_events++;
}
}
else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
{
int errflags = POLLHUP | POLLERR | POLLNVAL;
Assert(cur_event->fd >= PGINVALID_SOCKET);
if ((cur_event->events & WL_SOCKET_READABLE) &&
(cur_pollfd->revents & (POLLIN | errflags)))
{
/* data available in socket, or EOF */
occurred_events->events |= WL_SOCKET_READABLE;
}
if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
(cur_pollfd->revents & (POLLOUT | errflags)))
{
/* writeable, or EOF */
occurred_events->events |= WL_SOCKET_WRITEABLE;
}
if (occurred_events->events != 0)
{
occurred_events->fd = cur_event->fd;
occurred_events++;
returned_events++;
}
}
}
return returned_events;
}
#elif defined(WAIT_USE_WIN32)
/*
* Wait using Windows' WaitForMultipleObjects().
*
* Unfortunately this will only ever return a single readiness notification at
* a time. Note that while the official documentation for
* WaitForMultipleObjects is ambiguous about multiple events being "consumed"
* with a single bWaitAll = FALSE call,
* https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273 confirms
* that only one event is "consumed".
*/
static inline int
WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
WaitEvent *occurred_events, int nevents)
{
int returned_events = 0;
DWORD rc;
WaitEvent *cur_event;
/* Reset any wait events that need it */
for (cur_event = set->events;
cur_event < (set->events + set->nevents);
cur_event++)
{
if (cur_event->reset)
{
WaitEventAdjustWin32(set, cur_event);
cur_event->reset = false;
}
/*
* Windows does not guarantee to log an FD_WRITE network event
* indicating that more data can be sent unless the previous send()
* failed with WSAEWOULDBLOCK. While our caller might well have made
* such a call, we cannot assume that here. Therefore, if waiting for
* write-ready, force the issue by doing a dummy send(). If the dummy
* send() succeeds, assume that the socket is in fact write-ready, and
* return immediately. Also, if it fails with something other than
* WSAEWOULDBLOCK, return a write-ready indication to let our caller
* deal with the error condition.
*/
if (cur_event->events & WL_SOCKET_WRITEABLE)
{
char c;
WSABUF buf;
DWORD sent;
int r;
buf.buf = &c;
buf.len = 0;
r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
{
occurred_events->pos = cur_event->pos;
occurred_events->user_data = cur_event->user_data;
occurred_events->events = WL_SOCKET_WRITEABLE;
occurred_events->fd = cur_event->fd;
return 1;
}
}
}
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
/*
* Sleep.
*
* Need to wait for ->nevents + 1, because signal handle is in [0].
*/
rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
cur_timeout);
/* Check return code */
if (rc == WAIT_FAILED)
elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
GetLastError());
else if (rc == WAIT_TIMEOUT)
{
/* timeout exceeded */
return -1;
}
if (rc == WAIT_OBJECT_0)
{
/* Service newly-arrived signals */
pgwin32_dispatch_queued_signals();
return 0; /* retry */
}
/*
* With an offset of one, due to the always present pgwin32_signal_event,
* the handle offset directly corresponds to a wait event.
*/
cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
occurred_events->pos = cur_event->pos;
occurred_events->user_data = cur_event->user_data;
occurred_events->events = 0;
if (cur_event->events == WL_LATCH_SET)
{
if (!ResetEvent(set->latch->event))
elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
if (set->latch->is_set)
{
occurred_events->fd = PGINVALID_SOCKET;
occurred_events->events = WL_LATCH_SET;
occurred_events++;
returned_events++;
}
}
else if (cur_event->events == WL_POSTMASTER_DEATH)
{
/*
* Postmaster apparently died. Since the consequences of falsely
* returning WL_POSTMASTER_DEATH could be pretty unpleasant, we take
* the trouble to positively verify this with PostmasterIsAlive(),
* even though there is no known reason to think that the event could
* be falsely set on Windows.
*/
if (!PostmasterIsAlive())
{
occurred_events->fd = PGINVALID_SOCKET;
occurred_events->events = WL_POSTMASTER_DEATH;
occurred_events++;
returned_events++;
}
}
else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
{
WSANETWORKEVENTS resEvents;
HANDLE handle = set->handles[cur_event->pos + 1];
Assert(cur_event->fd);
occurred_events->fd = cur_event->fd;
ZeroMemory(&resEvents, sizeof(resEvents));
if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
elog(ERROR, "failed to enumerate network events: error code %u",
WSAGetLastError());
if ((cur_event->events & WL_SOCKET_READABLE) &&
(resEvents.lNetworkEvents & FD_READ))
{
/* data available in socket */
occurred_events->events |= WL_SOCKET_READABLE;
/*------
* WaitForMultipleObjects doesn't guarantee that a read event will
* be returned if the latch is set at the same time. Even if it
* did, the caller might drop that event expecting it to reoccur
* on next call. So, we must force the event to be reset if this
* WaitEventSet is used again in order to avoid an indefinite
* hang. Refer https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
* for the behavior of socket events.
*------
*/
cur_event->reset = true;
Introduce WaitEventSet API. Commit ac1d794 ("Make idle backends exit if the postmaster dies.") introduced a regression on, at least, large linux systems. Constantly adding the same postmaster_alive_fds to the OSs internal datastructures for implementing poll/select can cause significant contention; leading to a performance regression of nearly 3x in one example. This can be avoided by using e.g. linux' epoll, which avoids having to add/remove file descriptors to the wait datastructures at a high rate. Unfortunately the current latch interface makes it hard to allocate any persistent per-backend resources. Replace, with a backward compatibility layer, WaitLatchOrSocket with a new WaitEventSet API. Users can allocate such a Set across multiple calls, and add more than one file-descriptor to wait on. The latter has been added because there's upcoming postgres features where that will be helpful. In addition to the previously existing poll(2), select(2), WaitForMultipleObjects() implementations also provide an epoll_wait(2) based implementation to address the aforementioned performance problem. Epoll is only available on linux, but that is the most likely OS for machines large enough (four sockets) to reproduce the problem. To actually address the aforementioned regression, create and use a long-lived WaitEventSet for FE/BE communication. There are additional places that would benefit from a long-lived set, but that's a task for another day. Thanks to Amit Kapila, who helped make the windows code I blindly wrote actually work. Reported-By: Dmitry Vasilyev Discussion: CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com 20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
}
if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
(resEvents.lNetworkEvents & FD_WRITE))
{
/* writeable */
occurred_events->events |= WL_SOCKET_WRITEABLE;
}
if (resEvents.lNetworkEvents & FD_CLOSE)
{
/* EOF */
if (cur_event->events & WL_SOCKET_READABLE)
occurred_events->events |= WL_SOCKET_READABLE;
if (cur_event->events & WL_SOCKET_WRITEABLE)
occurred_events->events |= WL_SOCKET_WRITEABLE;
}
if (occurred_events->events != 0)
{
occurred_events++;
returned_events++;
}
}
return returned_events;
}
#endif
/*
* SetLatch uses SIGUSR1 to wake up the process waiting on the latch.
*
* Wake up WaitLatch, if we're waiting. (We might not be, since SIGUSR1 is
* overloaded for multiple purposes; or we might not have reached WaitLatch
* yet, in which case we don't need to fill the pipe either.)
*
* NB: when calling this in a signal handler, be sure to save and restore
* errno around it.
*/
#ifndef WIN32
void
latch_sigusr1_handler(void)
{
if (waiting)
sendSelfPipeByte();
}
#endif /* !WIN32 */
/* Send one byte to the self-pipe, to wake up WaitLatch */
#ifndef WIN32
static void
sendSelfPipeByte(void)
{
2011-04-10 17:42:00 +02:00
int rc;
char dummy = 0;
retry:
rc = write(selfpipe_writefd, &dummy, 1);
if (rc < 0)
{
/* If interrupted by signal, just retry */
if (errno == EINTR)
goto retry;
/*
2011-04-10 17:42:00 +02:00
* If the pipe is full, we don't need to retry, the data that's there
* already is enough to wake up WaitLatch.
*/
if (errno == EAGAIN || errno == EWOULDBLOCK)
return;
/*
2011-04-10 17:42:00 +02:00
* Oops, the write() failed for some other reason. We might be in a
* signal handler, so it's not safe to elog(). We have no choice but
* silently ignore the error.
*/
return;
}
}
#endif /* !WIN32 */
/*
* Read all available data from the self-pipe
*
* Note: this is only called when waiting = true. If it fails and doesn't
* return, it must reset that flag first (though ideally, this will never
* happen).
*/
#ifndef WIN32
static void
drainSelfPipe(void)
{
/*
2011-04-10 17:42:00 +02:00
* There shouldn't normally be more than one byte in the pipe, or maybe a
* few bytes if multiple processes run SetLatch at the same instant.
*/
2011-04-10 17:42:00 +02:00
char buf[16];
int rc;
for (;;)
{
rc = read(selfpipe_readfd, buf, sizeof(buf));
if (rc < 0)
{
if (errno == EAGAIN || errno == EWOULDBLOCK)
2011-04-10 17:42:00 +02:00
break; /* the pipe is empty */
else if (errno == EINTR)
2011-04-10 17:42:00 +02:00
continue; /* retry */
else
{
waiting = false;
elog(ERROR, "read() on self-pipe failed: %m");
}
}
else if (rc == 0)
{
waiting = false;
elog(ERROR, "unexpected EOF on self-pipe");
}
else if (rc < sizeof(buf))
{
/* we successfully drained the pipe; no need to read() again */
break;
}
/* else buffer wasn't big enough, so read again */
}
}
#endif /* !WIN32 */