1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* libpq.h
|
1996-07-09 08:22:35 +02:00
|
|
|
* POSTGRES LIBPQ buffer structure definitions.
|
|
|
|
*
|
|
|
|
*
|
2021-01-02 19:06:25 +01:00
|
|
|
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/include/libpq/libpq.h
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#ifndef LIBPQ_H
|
|
|
|
#define LIBPQ_H
|
|
|
|
|
1998-01-26 02:42:53 +01:00
|
|
|
#include <netinet/in.h>
|
|
|
|
|
1999-08-31 06:26:40 +02:00
|
|
|
#include "lib/stringinfo.h"
|
1998-01-26 02:42:53 +01:00
|
|
|
#include "libpq/libpq-be.h"
|
Introduce WaitEventSet API.
Commit ac1d794 ("Make idle backends exit if the postmaster dies.")
introduced a regression on, at least, large linux systems. Constantly
adding the same postmaster_alive_fds to the OSs internal datastructures
for implementing poll/select can cause significant contention; leading
to a performance regression of nearly 3x in one example.
This can be avoided by using e.g. linux' epoll, which avoids having to
add/remove file descriptors to the wait datastructures at a high rate.
Unfortunately the current latch interface makes it hard to allocate any
persistent per-backend resources.
Replace, with a backward compatibility layer, WaitLatchOrSocket with a
new WaitEventSet API. Users can allocate such a Set across multiple
calls, and add more than one file-descriptor to wait on. The latter has
been added because there's upcoming postgres features where that will be
helpful.
In addition to the previously existing poll(2), select(2),
WaitForMultipleObjects() implementations also provide an epoll_wait(2)
based implementation to address the aforementioned performance
problem. Epoll is only available on linux, but that is the most likely
OS for machines large enough (four sockets) to reproduce the problem.
To actually address the aforementioned regression, create and use a
long-lived WaitEventSet for FE/BE communication. There are additional
places that would benefit from a long-lived set, but that's a task for
another day.
Thanks to Amit Kapila, who helped make the windows code I blindly wrote
actually work.
Reported-By: Dmitry Vasilyev Discussion:
CAB-SwXZh44_2ybvS5Z67p_CDz=XFn4hNAD=CnMEF+QqkXwFrGg@mail.gmail.com
20160114143931.GG10941@awork2.anarazel.de
2016-03-21 09:56:39 +01:00
|
|
|
#include "storage/latch.h"
|
1996-10-23 09:42:13 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
|
Add heuristic incoming-message-size limits in the server.
We had a report of confusing server behavior caused by a client bug
that sent junk to the server: the server thought the junk was a
very long message length and waited patiently for data that would
never come. We can reduce the risk of that by being less trusting
about message lengths.
For a long time, libpq has had a heuristic rule that it wouldn't
believe large message size words, except for a small number of
message types that are expected to be (potentially) long. This
provides some defense against loss of message-boundary sync and
other corrupted-data cases. The server does something similar,
except that up to now it only limited the lengths of messages
received during the connection authentication phase. Let's
do the same as in libpq and put restrictions on the allowed
length of all messages, while distinguishing between message
types that are expected to be long and those that aren't.
I used a limit of 10000 bytes for non-long messages. (libpq's
corresponding limit is 30000 bytes, but given the asymmetry of
the FE/BE protocol, there's no good reason why the numbers should
be the same.) Experimentation suggests that this is at least a
factor of 10, maybe a factor of 100, more than we really need;
but plenty of daylight seems desirable to avoid false positives.
In any case we can adjust the limit based on beta-test results.
For long messages, set a limit of MaxAllocSize - 1, which is the
most that we can absorb into the StringInfo buffer that the message
is collected in. This just serves to make sure that a bogus message
size is reported as such, rather than as a confusing gripe about
not being able to enlarge a string buffer.
While at it, make sure that non-mainline code paths (such as
COPY FROM STDIN) are as paranoid as SocketBackend is, and validate
the message type code before believing the message length.
This provides an additional guard against getting stuck on corrupted
input.
Discussion: https://postgr.es/m/2003757.1619373089@sss.pgh.pa.us
2021-04-28 21:50:42 +02:00
|
|
|
/*
|
|
|
|
* Callers of pq_getmessage() must supply a maximum expected message size.
|
|
|
|
* By convention, if there's not any specific reason to use another value,
|
|
|
|
* use PQ_SMALL_MESSAGE_LIMIT for messages that shouldn't be too long, and
|
|
|
|
* PQ_LARGE_MESSAGE_LIMIT for messages that can be long.
|
|
|
|
*/
|
|
|
|
#define PQ_SMALL_MESSAGE_LIMIT 10000
|
|
|
|
#define PQ_LARGE_MESSAGE_LIMIT (MaxAllocSize - 1)
|
|
|
|
|
2014-10-31 17:02:40 +01:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
void (*comm_reset) (void);
|
|
|
|
int (*flush) (void);
|
|
|
|
int (*flush_if_writable) (void);
|
|
|
|
bool (*is_send_pending) (void);
|
|
|
|
int (*putmessage) (char msgtype, const char *s, size_t len);
|
|
|
|
void (*putmessage_noblock) (char msgtype, const char *s, size_t len);
|
|
|
|
} PQcommMethods;
|
|
|
|
|
2018-10-16 05:45:30 +02:00
|
|
|
extern const PGDLLIMPORT PQcommMethods *PqCommMethods;
|
2014-10-31 17:02:40 +01:00
|
|
|
|
|
|
|
#define pq_comm_reset() (PqCommMethods->comm_reset())
|
|
|
|
#define pq_flush() (PqCommMethods->flush())
|
|
|
|
#define pq_flush_if_writable() (PqCommMethods->flush_if_writable())
|
|
|
|
#define pq_is_send_pending() (PqCommMethods->is_send_pending())
|
|
|
|
#define pq_putmessage(msgtype, s, len) \
|
|
|
|
(PqCommMethods->putmessage(msgtype, s, len))
|
|
|
|
#define pq_putmessage_noblock(msgtype, s, len) \
|
2016-07-29 18:52:57 +02:00
|
|
|
(PqCommMethods->putmessage_noblock(msgtype, s, len))
|
2014-10-31 17:02:40 +01:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
|
|
|
* External functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* prototypes for functions in pqcomm.c
|
|
|
|
*/
|
2019-06-09 04:33:52 +02:00
|
|
|
extern WaitEventSet *FeBeWaitSet;
|
|
|
|
|
2021-03-01 03:16:56 +01:00
|
|
|
#define FeBeWaitSetSocketPos 0
|
|
|
|
#define FeBeWaitSetLatchPos 1
|
|
|
|
|
2020-01-31 11:50:32 +01:00
|
|
|
extern int StreamServerPort(int family, const char *hostName,
|
|
|
|
unsigned short portNumber, const char *unixSocketDir,
|
2012-08-10 23:26:44 +02:00
|
|
|
pgsocket ListenSocket[], int MaxListen);
|
2010-01-10 15:16:08 +01:00
|
|
|
extern int StreamConnection(pgsocket server_fd, Port *port);
|
|
|
|
extern void StreamClose(pgsocket sock);
|
2012-08-10 23:26:44 +02:00
|
|
|
extern void TouchSocketFiles(void);
|
2015-08-02 20:54:44 +02:00
|
|
|
extern void RemoveSocketFiles(void);
|
1999-04-25 05:19:27 +02:00
|
|
|
extern void pq_init(void);
|
|
|
|
extern int pq_getbytes(char *s, size_t len);
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 16:08:45 +01:00
|
|
|
extern void pq_startmsgread(void);
|
|
|
|
extern void pq_endmsgread(void);
|
|
|
|
extern bool pq_is_reading_msg(void);
|
2003-04-19 02:02:30 +02:00
|
|
|
extern int pq_getmessage(StringInfo s, int maxlen);
|
2001-12-04 20:40:17 +01:00
|
|
|
extern int pq_getbyte(void);
|
1999-04-25 05:19:27 +02:00
|
|
|
extern int pq_peekbyte(void);
|
2010-01-15 10:19:10 +01:00
|
|
|
extern int pq_getbyte_if_available(unsigned char *c);
|
2021-11-08 17:01:43 +01:00
|
|
|
extern bool pq_buffer_has_data(void);
|
2021-03-04 09:45:55 +01:00
|
|
|
extern int pq_putmessage_v2(char msgtype, const char *s, size_t len);
|
2021-04-02 21:52:30 +02:00
|
|
|
extern bool pq_check_connection(void);
|
1999-01-23 23:27:29 +01:00
|
|
|
|
2003-04-19 02:02:30 +02:00
|
|
|
/*
|
|
|
|
* prototypes for functions in be-secure.c
|
|
|
|
*/
|
2018-06-26 10:19:35 +02:00
|
|
|
extern char *ssl_library;
|
2012-02-22 22:40:46 +01:00
|
|
|
extern char *ssl_cert_file;
|
|
|
|
extern char *ssl_key_file;
|
|
|
|
extern char *ssl_ca_file;
|
|
|
|
extern char *ssl_crl_file;
|
2021-02-18 07:59:10 +01:00
|
|
|
extern char *ssl_crl_dir;
|
Always use 2048 bit DH parameters for OpenSSL ephemeral DH ciphers.
1024 bits is considered weak these days, but OpenSSL always passes 1024 as
the key length to the tmp_dh callback. All the code to handle other key
lengths is, in fact, dead.
To remedy those issues:
* Only include hard-coded 2048-bit parameters.
* Set the parameters directly with SSL_CTX_set_tmp_dh(), without the
callback
* The name of the file containing the DH parameters is now a GUC. This
replaces the old hardcoded "dh1024.pem" filename. (The files for other
key lengths, dh512.pem, dh2048.pem, etc. were never actually used.)
This is not a new problem, but it doesn't seem worth the risk and churn to
backport. If you care enough about the strength of the DH parameters on
old versions, you can create custom DH parameters, with as many bits as you
wish, and put them in the "dh1024.pem" file.
Per report by Nicolas Guini and Damian Quiroga. Reviewed by Michael Paquier.
Discussion: https://www.postgresql.org/message-id/CAMxBoUyjOOautVozN6ofzym828aNrDjuCcOTcCquxjwS-L2hGQ@mail.gmail.com
2017-07-31 21:36:09 +02:00
|
|
|
extern char *ssl_dh_params_file;
|
2020-03-26 00:37:30 +01:00
|
|
|
extern PGDLLIMPORT char *ssl_passphrase_command;
|
|
|
|
extern PGDLLIMPORT bool ssl_passphrase_command_supports_reload;
|
2019-06-09 04:33:52 +02:00
|
|
|
#ifdef USE_SSL
|
|
|
|
extern bool ssl_loaded_verify_locations;
|
|
|
|
#endif
|
2012-02-22 22:40:46 +01:00
|
|
|
|
2017-01-04 18:43:52 +01:00
|
|
|
extern int secure_initialize(bool isServerStart);
|
2008-11-20 10:29:36 +01:00
|
|
|
extern bool secure_loaded_verify_locations(void);
|
2003-04-19 02:02:30 +02:00
|
|
|
extern void secure_destroy(void);
|
|
|
|
extern int secure_open_server(Port *port);
|
|
|
|
extern void secure_close(Port *port);
|
|
|
|
extern ssize_t secure_read(Port *port, void *ptr, size_t len);
|
|
|
|
extern ssize_t secure_write(Port *port, void *ptr, size_t len);
|
Break out OpenSSL-specific code to separate files.
This refactoring is in preparation for adding support for other SSL
implementations, with no user-visible effects. There are now two #defines,
USE_OPENSSL which is defined when building with OpenSSL, and USE_SSL which
is defined when building with any SSL implementation. Currently, OpenSSL is
the only implementation so the two #defines go together, but USE_SSL is
supposed to be used for implementation-independent code.
The libpq SSL code is changed to use a custom BIO, which does all the raw
I/O, like we've been doing in the backend for a long time. That makes it
possible to use MSG_NOSIGNAL to block SIGPIPE when using SSL, which avoids
a couple of syscall for each send(). Probably doesn't make much performance
difference in practice - the SSL encryption is expensive enough to mask the
effect - but it was a natural result of this refactoring.
Based on a patch by Martijn van Oosterhout from 2006. Briefly reviewed by
Alvaro Herrera, Andreas Karlsson, Jeff Janes.
2014-08-11 10:54:19 +02:00
|
|
|
extern ssize_t secure_raw_read(Port *port, void *ptr, size_t len);
|
|
|
|
extern ssize_t secure_raw_write(Port *port, const void *ptr, size_t len);
|
2019-06-09 04:33:52 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* prototypes for functions in be-secure-gssapi.c
|
|
|
|
*/
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
#ifdef ENABLE_GSS
|
|
|
|
extern ssize_t secure_open_gssapi(Port *port);
|
|
|
|
#endif
|
Break out OpenSSL-specific code to separate files.
This refactoring is in preparation for adding support for other SSL
implementations, with no user-visible effects. There are now two #defines,
USE_OPENSSL which is defined when building with OpenSSL, and USE_SSL which
is defined when building with any SSL implementation. Currently, OpenSSL is
the only implementation so the two #defines go together, but USE_SSL is
supposed to be used for implementation-independent code.
The libpq SSL code is changed to use a custom BIO, which does all the raw
I/O, like we've been doing in the backend for a long time. That makes it
possible to use MSG_NOSIGNAL to block SIGPIPE when using SSL, which avoids
a couple of syscall for each send(). Probably doesn't make much performance
difference in practice - the SSL encryption is expensive enough to mask the
effect - but it was a natural result of this refactoring.
Based on a patch by Martijn van Oosterhout from 2006. Briefly reviewed by
Alvaro Herrera, Andreas Karlsson, Jeff Janes.
2014-08-11 10:54:19 +02:00
|
|
|
|
|
|
|
/* GUCs */
|
|
|
|
extern char *SSLCipherSuites;
|
|
|
|
extern char *SSLECDHCurve;
|
|
|
|
extern bool SSLPreferServerCiphers;
|
2018-11-20 21:49:01 +01:00
|
|
|
extern int ssl_min_protocol_version;
|
|
|
|
extern int ssl_max_protocol_version;
|
|
|
|
|
|
|
|
enum ssl_protocol_versions
|
|
|
|
{
|
|
|
|
PG_TLS_ANY = 0,
|
|
|
|
PG_TLS1_VERSION,
|
|
|
|
PG_TLS1_1_VERSION,
|
|
|
|
PG_TLS1_2_VERSION,
|
|
|
|
PG_TLS1_3_VERSION,
|
|
|
|
};
|
2003-04-19 02:02:30 +02:00
|
|
|
|
2018-02-26 19:28:38 +01:00
|
|
|
/*
|
|
|
|
* prototypes for functions in be-secure-common.c
|
|
|
|
*/
|
|
|
|
extern int run_ssl_passphrase_command(const char *prompt, bool is_server_start,
|
|
|
|
char *buf, int size);
|
2018-04-02 17:34:52 +02:00
|
|
|
extern bool check_ssl_key_file_permissions(const char *ssl_key_file,
|
|
|
|
bool isServerStart);
|
2018-02-26 19:28:38 +01:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
#endif /* LIBPQ_H */
|